aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/container/internal
diff options
context:
space:
mode:
authoranastasy888 <anastasy888@yandex-team.ru>2022-02-10 16:45:54 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:54 +0300
commit49f765d71da452ea93138a25559dfa68dd76c7f3 (patch)
tree1016041feb637349e401dcc0fa85217dd2c2c639 /contrib/restricted/abseil-cpp/absl/container/internal
parent7353a3fdea9c67c256980c00a2b3b67f09b23a27 (diff)
downloadydb-49f765d71da452ea93138a25559dfa68dd76c7f3.tar.gz
Restoring authorship annotation for <anastasy888@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/container/internal')
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree.h3730
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h962
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/common.h380
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h508
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h818
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/counting_allocator.h120
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h276
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h318
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h364
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h364
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug.h216
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h162
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc314
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h402
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc54
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/have_sse.h80
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h926
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/layout.h1462
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/node_hash_policy.h176
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h386
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc88
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h3180
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/test_instance_tracker.h544
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/tracked.h156
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h940
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h230
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_members_test.h170
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h622
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h988
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h178
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_members_test.h168
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h372
32 files changed, 9827 insertions, 9827 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
index f636c5fc73..3c0f7066ad 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
@@ -1,104 +1,104 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A btree implementation of the STL set and map interfaces. A btree is smaller
-// and generally also faster than STL set/map (refer to the benchmarks below).
-// The red-black tree implementation of STL set/map has an overhead of 3
-// pointers (left, right and parent) plus the node color information for each
-// stored value. So a set<int32_t> consumes 40 bytes for each value stored in
-// 64-bit mode. This btree implementation stores multiple values on fixed
-// size nodes (usually 256 bytes) and doesn't store child pointers for leaf
-// nodes. The result is that a btree_set<int32_t> may use much less memory per
-// stored value. For the random insertion benchmark in btree_bench.cc, a
-// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value.
-//
-// The packing of multiple values on to each node of a btree has another effect
-// besides better space utilization: better cache locality due to fewer cache
-// lines being accessed. Better cache locality translates into faster
-// operations.
-//
-// CAVEATS
-//
-// Insertions and deletions on a btree can cause splitting, merging or
-// rebalancing of btree nodes. And even without these operations, insertions
-// and deletions on a btree will move values around within a node. In both
-// cases, the result is that insertions and deletions can invalidate iterators
-// pointing to values other than the one being inserted/deleted. Therefore, this
-// container does not provide pointer stability. This is notably different from
-// STL set/map which takes care to not invalidate iterators on insert/erase
-// except, of course, for iterators pointing to the value being erased. A
-// partial workaround when erasing is available: erase() returns an iterator
-// pointing to the item just after the one that was erased (or end() if none
-// exists).
-
-#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
-#define ABSL_CONTAINER_INTERNAL_BTREE_H_
-
-#include <algorithm>
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <functional>
-#include <iterator>
-#include <limits>
-#include <new>
-#include <string>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/macros.h"
-#include "absl/container/internal/common.h"
-#include "absl/container/internal/compressed_tuple.h"
-#include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/layout.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A btree implementation of the STL set and map interfaces. A btree is smaller
+// and generally also faster than STL set/map (refer to the benchmarks below).
+// The red-black tree implementation of STL set/map has an overhead of 3
+// pointers (left, right and parent) plus the node color information for each
+// stored value. So a set<int32_t> consumes 40 bytes for each value stored in
+// 64-bit mode. This btree implementation stores multiple values on fixed
+// size nodes (usually 256 bytes) and doesn't store child pointers for leaf
+// nodes. The result is that a btree_set<int32_t> may use much less memory per
+// stored value. For the random insertion benchmark in btree_bench.cc, a
+// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value.
+//
+// The packing of multiple values on to each node of a btree has another effect
+// besides better space utilization: better cache locality due to fewer cache
+// lines being accessed. Better cache locality translates into faster
+// operations.
+//
+// CAVEATS
+//
+// Insertions and deletions on a btree can cause splitting, merging or
+// rebalancing of btree nodes. And even without these operations, insertions
+// and deletions on a btree will move values around within a node. In both
+// cases, the result is that insertions and deletions can invalidate iterators
+// pointing to values other than the one being inserted/deleted. Therefore, this
+// container does not provide pointer stability. This is notably different from
+// STL set/map which takes care to not invalidate iterators on insert/erase
+// except, of course, for iterators pointing to the value being erased. A
+// partial workaround when erasing is available: erase() returns an iterator
+// pointing to the item just after the one that was erased (or end() if none
+// exists).
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <new>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/strings/cord.h"
-#include "absl/strings/string_view.h"
-#include "absl/types/compare.h"
-#include "absl/utility/utility.h"
-
-namespace absl {
+#include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// A helper class that indicates if the Compare parameter is a key-compare-to
-// comparator.
-template <typename Compare, typename T>
-using btree_is_key_compare_to =
- std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
- absl::weak_ordering>;
-
-struct StringBtreeDefaultLess {
- using is_transparent = void;
-
- StringBtreeDefaultLess() = default;
-
- // Compatibility constructor.
- StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
+namespace container_internal {
+
+// A helper class that indicates if the Compare parameter is a key-compare-to
+// comparator.
+template <typename Compare, typename T>
+using btree_is_key_compare_to =
+ std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
+ absl::weak_ordering>;
+
+struct StringBtreeDefaultLess {
+ using is_transparent = void;
+
+ StringBtreeDefaultLess() = default;
+
+ // Compatibility constructor.
+ StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
StringBtreeDefaultLess(std::less<absl::string_view>) {} // NOLINT
-
+
// Allow converting to std::less for use in key_comp()/value_comp().
explicit operator std::less<std::string>() const { return {}; }
explicit operator std::less<absl::string_view>() const { return {}; }
explicit operator std::less<absl::Cord>() const { return {}; }
- absl::weak_ordering operator()(absl::string_view lhs,
- absl::string_view rhs) const {
- return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
- }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
+ }
StringBtreeDefaultLess(std::less<absl::Cord>) {} // NOLINT
absl::weak_ordering operator()(const absl::Cord &lhs,
const absl::Cord &rhs) const {
@@ -112,25 +112,25 @@ struct StringBtreeDefaultLess {
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
}
-};
-
-struct StringBtreeDefaultGreater {
- using is_transparent = void;
-
- StringBtreeDefaultGreater() = default;
-
- StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
+};
+
+struct StringBtreeDefaultGreater {
+ using is_transparent = void;
+
+ StringBtreeDefaultGreater() = default;
+
+ StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
StringBtreeDefaultGreater(std::greater<absl::string_view>) {} // NOLINT
-
+
// Allow converting to std::greater for use in key_comp()/value_comp().
explicit operator std::greater<std::string>() const { return {}; }
explicit operator std::greater<absl::string_view>() const { return {}; }
explicit operator std::greater<absl::Cord>() const { return {}; }
- absl::weak_ordering operator()(absl::string_view lhs,
- absl::string_view rhs) const {
- return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
- }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
+ }
StringBtreeDefaultGreater(std::greater<absl::Cord>) {} // NOLINT
absl::weak_ordering operator()(const absl::Cord &lhs,
const absl::Cord &rhs) const {
@@ -144,44 +144,44 @@ struct StringBtreeDefaultGreater {
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
}
-};
-
-// A helper class to convert a boolean comparison into a three-way "compare-to"
+};
+
+// A helper class to convert a boolean comparison into a three-way "compare-to"
// comparison that returns an `absl::weak_ordering`. This helper
-// class is specialized for less<std::string>, greater<std::string>,
+// class is specialized for less<std::string>, greater<std::string>,
// less<string_view>, greater<string_view>, less<absl::Cord>, and
// greater<absl::Cord>.
-//
-// key_compare_to_adapter is provided so that btree users
-// automatically get the more efficient compare-to code when using common
+//
+// key_compare_to_adapter is provided so that btree users
+// automatically get the more efficient compare-to code when using common
// Abseil string types with common comparison functors.
-// These string-like specializations also turn on heterogeneous lookup by
-// default.
-template <typename Compare>
-struct key_compare_to_adapter {
- using type = Compare;
-};
-
-template <>
-struct key_compare_to_adapter<std::less<std::string>> {
- using type = StringBtreeDefaultLess;
-};
-
-template <>
-struct key_compare_to_adapter<std::greater<std::string>> {
- using type = StringBtreeDefaultGreater;
-};
-
-template <>
-struct key_compare_to_adapter<std::less<absl::string_view>> {
- using type = StringBtreeDefaultLess;
-};
-
-template <>
-struct key_compare_to_adapter<std::greater<absl::string_view>> {
- using type = StringBtreeDefaultGreater;
-};
-
+// These string-like specializations also turn on heterogeneous lookup by
+// default.
+template <typename Compare>
+struct key_compare_to_adapter {
+ using type = Compare;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<std::string>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<std::string>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<absl::string_view>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::string_view>> {
+ using type = StringBtreeDefaultGreater;
+};
+
template <>
struct key_compare_to_adapter<std::less<absl::Cord>> {
using type = StringBtreeDefaultLess;
@@ -224,32 +224,32 @@ struct prefers_linear_node_search<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: T::absl_btree_prefer_linear_node_search {};
-template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi, typename SlotPolicy>
-struct common_params {
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi, typename SlotPolicy>
+struct common_params {
using original_key_compare = Compare;
// If Compare is a common comparator for a string-like type, then we adapt it
- // to use heterogeneous lookup and to be a key-compare-to comparator.
- using key_compare = typename key_compare_to_adapter<Compare>::type;
- // A type which indicates if we have a key-compare-to functor or a plain old
- // key-compare functor.
- using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
-
- using allocator_type = Alloc;
- using key_type = Key;
- using size_type = std::make_signed<size_t>::type;
- using difference_type = ptrdiff_t;
-
- using slot_policy = SlotPolicy;
- using slot_type = typename slot_policy::slot_type;
- using value_type = typename slot_policy::value_type;
- using init_type = typename slot_policy::mutable_value_type;
- using pointer = value_type *;
- using const_pointer = const value_type *;
- using reference = value_type &;
- using const_reference = const value_type &;
-
+ // to use heterogeneous lookup and to be a key-compare-to comparator.
+ using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // A type which indicates if we have a key-compare-to functor or a plain old
+ // key-compare functor.
+ using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
+
+ using allocator_type = Alloc;
+ using key_type = Key;
+ using size_type = std::make_signed<size_t>::type;
+ using difference_type = ptrdiff_t;
+
+ using slot_policy = SlotPolicy;
+ using slot_type = typename slot_policy::slot_type;
+ using value_type = typename slot_policy::value_type;
+ using init_type = typename slot_policy::mutable_value_type;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+
// For the given lookup key type, returns whether we can have multiple
// equivalent keys in the btree. If this is a multi-container, then we can.
// Otherwise, we can have multiple equivalent keys only if all of the
@@ -267,74 +267,74 @@ struct common_params {
!std::is_same<key_compare, StringBtreeDefaultGreater>::value);
}
- enum {
- kTargetNodeSize = TargetNodeSize,
-
- // Upper bound for the available space for values. This is largest for leaf
- // nodes, which have overhead of at least a pointer + 4 bytes (for storing
- // 3 field_types and an enum).
- kNodeValueSpace =
- TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
- };
-
- // This is an integral type large enough to hold as many
- // ValueSize-values as will fit a node of TargetNodeSize bytes.
- using node_count_type =
- absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
- (std::numeric_limits<uint8_t>::max)()),
- uint16_t, uint8_t>; // NOLINT
-
- // The following methods are necessary for passing this struct as PolicyTraits
- // for node_handle and/or are used within btree.
- static value_type &element(slot_type *slot) {
- return slot_policy::element(slot);
- }
- static const value_type &element(const slot_type *slot) {
- return slot_policy::element(slot);
- }
- template <class... Args>
- static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
- slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
- }
- static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
- slot_policy::construct(alloc, slot, other);
- }
- static void destroy(Alloc *alloc, slot_type *slot) {
- slot_policy::destroy(alloc, slot);
- }
- static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
- construct(alloc, new_slot, old_slot);
- destroy(alloc, old_slot);
- }
- static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
- slot_policy::swap(alloc, a, b);
- }
- static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
- slot_policy::move(alloc, src, dest);
- }
-};
-
-// A parameters structure for holding the type parameters for a btree_map.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Data, typename Compare, typename Alloc,
- int TargetNodeSize, bool Multi>
-struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- map_slot_policy<Key, Data>> {
- using super_type = typename map_params::common_params;
- using mapped_type = Data;
- // This type allows us to move keys when it is safe to do so. It is safe
- // for maps in which value_type and mutable_value_type are layout compatible.
- using slot_policy = typename super_type::slot_policy;
- using slot_type = typename super_type::slot_type;
- using value_type = typename super_type::value_type;
- using init_type = typename super_type::init_type;
-
+ enum {
+ kTargetNodeSize = TargetNodeSize,
+
+ // Upper bound for the available space for values. This is largest for leaf
+ // nodes, which have overhead of at least a pointer + 4 bytes (for storing
+ // 3 field_types and an enum).
+ kNodeValueSpace =
+ TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
+ };
+
+ // This is an integral type large enough to hold as many
+ // ValueSize-values as will fit a node of TargetNodeSize bytes.
+ using node_count_type =
+ absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+ (std::numeric_limits<uint8_t>::max)()),
+ uint16_t, uint8_t>; // NOLINT
+
+ // The following methods are necessary for passing this struct as PolicyTraits
+ // for node_handle and/or are used within btree.
+ static value_type &element(slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ static const value_type &element(const slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ template <class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ slot_policy::construct(alloc, slot, other);
+ }
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ slot_policy::destroy(alloc, slot);
+ }
+ static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+ construct(alloc, new_slot, old_slot);
+ destroy(alloc, old_slot);
+ }
+ static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
+ slot_policy::swap(alloc, a, b);
+ }
+ static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
+ slot_policy::move(alloc, src, dest);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool Multi>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ map_slot_policy<Key, Data>> {
+ using super_type = typename map_params::common_params;
+ using mapped_type = Data;
+ // This type allows us to move keys when it is safe to do so. It is safe
+ // for maps in which value_type and mutable_value_type are layout compatible.
+ using slot_policy = typename super_type::slot_policy;
+ using slot_type = typename super_type::slot_type;
+ using value_type = typename super_type::value_type;
+ using init_type = typename super_type::init_type;
+
using original_key_compare = typename super_type::original_key_compare;
// Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
class value_compare {
template <typename Params>
friend class btree;
-
+
protected:
explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
@@ -344,10 +344,10 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
auto operator()(const value_type &lhs, const value_type &rhs) const
-> decltype(comp(lhs.first, rhs.first)) {
return comp(lhs.first, rhs.first);
- }
- };
- using is_map_container = std::true_type;
-
+ }
+ };
+ using is_map_container = std::true_type;
+
template <typename V>
static auto key(const V &value) -> decltype(value.first) {
return value.first;
@@ -359,140 +359,140 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
-> decltype(slot_policy::mutable_key(s)) {
return slot_policy::mutable_key(s);
}
- static mapped_type &value(value_type *value) { return value->second; }
-};
-
-// This type implements the necessary functions from the
-// absl::container_internal::slot_type interface.
-template <typename Key>
-struct set_slot_policy {
- using slot_type = Key;
- using value_type = Key;
- using mutable_value_type = Key;
-
- static value_type &element(slot_type *slot) { return *slot; }
- static const value_type &element(const slot_type *slot) { return *slot; }
-
- template <typename Alloc, class... Args>
- static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot,
- std::forward<Args>(args)...);
- }
-
- template <typename Alloc>
- static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
- }
-
- template <typename Alloc>
- static void destroy(Alloc *alloc, slot_type *slot) {
- absl::allocator_traits<Alloc>::destroy(*alloc, slot);
- }
-
- template <typename Alloc>
- static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
- using std::swap;
- swap(*a, *b);
- }
-
- template <typename Alloc>
- static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
- *dest = std::move(*src);
- }
-};
-
-// A parameters structure for holding the type parameters for a btree_set.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi>
-struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- set_slot_policy<Key>> {
- using value_type = Key;
- using slot_type = typename set_params::common_params::slot_type;
+ static mapped_type &value(value_type *value) { return value->second; }
+};
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface.
+template <typename Key>
+struct set_slot_policy {
+ using slot_type = Key;
+ using value_type = Key;
+ using mutable_value_type = Key;
+
+ static value_type &element(slot_type *slot) { return *slot; }
+ static const value_type &element(const slot_type *slot) { return *slot; }
+
+ template <typename Alloc, class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+ }
+
+ template <typename Alloc>
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+ }
+
+ template <typename Alloc>
+ static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
+ using std::swap;
+ swap(*a, *b);
+ }
+
+ template <typename Alloc>
+ static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
+ *dest = std::move(*src);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ set_slot_policy<Key>> {
+ using value_type = Key;
+ using slot_type = typename set_params::common_params::slot_type;
using value_compare =
typename set_params::common_params::original_key_compare;
- using is_map_container = std::false_type;
-
+ using is_map_container = std::false_type;
+
template <typename V>
static const V &key(const V &value) { return value; }
static const Key &key(const slot_type *slot) { return *slot; }
static const Key &key(slot_type *slot) { return *slot; }
-};
-
-// An adapter class that converts a lower-bound compare into an upper-bound
-// compare. Note: there is no need to make a version of this adapter specialized
-// for key-compare-to functors because the upper-bound (the first value greater
-// than the input) is never an exact match.
-template <typename Compare>
-struct upper_bound_adapter {
- explicit upper_bound_adapter(const Compare &c) : comp(c) {}
+};
+
+// An adapter class that converts a lower-bound compare into an upper-bound
+// compare. Note: there is no need to make a version of this adapter specialized
+// for key-compare-to functors because the upper-bound (the first value greater
+// than the input) is never an exact match.
+template <typename Compare>
+struct upper_bound_adapter {
+ explicit upper_bound_adapter(const Compare &c) : comp(c) {}
template <typename K1, typename K2>
bool operator()(const K1 &a, const K2 &b) const {
- // Returns true when a is not greater than b.
- return !compare_internal::compare_result_as_less_than(comp(b, a));
- }
-
- private:
- Compare comp;
-};
-
-enum class MatchKind : uint8_t { kEq, kNe };
-
-template <typename V, bool IsCompareTo>
-struct SearchResult {
- V value;
- MatchKind match;
-
- static constexpr bool HasMatch() { return true; }
- bool IsEq() const { return match == MatchKind::kEq; }
-};
-
-// When we don't use CompareTo, `match` is not present.
-// This ensures that callers can't use it accidentally when it provides no
-// useful information.
-template <typename V>
-struct SearchResult<V, false> {
+ // Returns true when a is not greater than b.
+ return !compare_internal::compare_result_as_less_than(comp(b, a));
+ }
+
+ private:
+ Compare comp;
+};
+
+enum class MatchKind : uint8_t { kEq, kNe };
+
+template <typename V, bool IsCompareTo>
+struct SearchResult {
+ V value;
+ MatchKind match;
+
+ static constexpr bool HasMatch() { return true; }
+ bool IsEq() const { return match == MatchKind::kEq; }
+};
+
+// When we don't use CompareTo, `match` is not present.
+// This ensures that callers can't use it accidentally when it provides no
+// useful information.
+template <typename V>
+struct SearchResult<V, false> {
SearchResult() {}
explicit SearchResult(V value) : value(value) {}
SearchResult(V value, MatchKind /*match*/) : value(value) {}
- V value;
-
- static constexpr bool HasMatch() { return false; }
- static constexpr bool IsEq() { return false; }
-};
-
-// A node in the btree holding. The same node type is used for both internal
-// and leaf nodes in the btree, though the nodes are allocated in such a way
-// that the children array is only valid in internal nodes.
-template <typename Params>
-class btree_node {
- using is_key_compare_to = typename Params::is_key_compare_to;
- using field_type = typename Params::node_count_type;
- using allocator_type = typename Params::allocator_type;
- using slot_type = typename Params::slot_type;
-
- public:
- using params_type = Params;
- using key_type = typename Params::key_type;
- using value_type = typename Params::value_type;
- using pointer = typename Params::pointer;
- using const_pointer = typename Params::const_pointer;
- using reference = typename Params::reference;
- using const_reference = typename Params::const_reference;
- using key_compare = typename Params::key_compare;
- using size_type = typename Params::size_type;
- using difference_type = typename Params::difference_type;
-
- // Btree decides whether to use linear node search as follows:
+ V value;
+
+ static constexpr bool HasMatch() { return false; }
+ static constexpr bool IsEq() { return false; }
+};
+
+// A node in the btree holding. The same node type is used for both internal
+// and leaf nodes in the btree, though the nodes are allocated in such a way
+// that the children array is only valid in internal nodes.
+template <typename Params>
+class btree_node {
+ using is_key_compare_to = typename Params::is_key_compare_to;
+ using field_type = typename Params::node_count_type;
+ using allocator_type = typename Params::allocator_type;
+ using slot_type = typename Params::slot_type;
+
+ public:
+ using params_type = Params;
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using key_compare = typename Params::key_compare;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+
+ // Btree decides whether to use linear node search as follows:
// - If the comparator expresses a preference, use that.
// - If the key expresses a preference, use that.
- // - If the key is arithmetic and the comparator is std::less or
- // std::greater, choose linear.
- // - Otherwise, choose binary.
- // TODO(ezb): Might make sense to add condition(s) based on node-size.
- using use_linear_search = std::integral_constant<
- bool,
+ // - If the key is arithmetic and the comparator is std::less or
+ // std::greater, choose linear.
+ // - Otherwise, choose binary.
+ // TODO(ezb): Might make sense to add condition(s) based on node-size.
+ using use_linear_search = std::integral_constant<
+ bool,
has_linear_node_search_preference<key_compare>::value
? prefers_linear_node_search<key_compare>::value
: has_linear_node_search_preference<key_type>::value
@@ -501,391 +501,391 @@ class btree_node {
(std::is_same<std::less<key_type>, key_compare>::value ||
std::is_same<std::greater<key_type>,
key_compare>::value)>;
-
+
// This class is organized by absl::container_internal::Layout as if it had
// the following structure:
- // // A pointer to the node's parent.
- // btree_node *parent;
- //
- // // The position of the node in the node's parent.
- // field_type position;
- // // The index of the first populated value in `values`.
- // // TODO(ezb): right now, `start` is always 0. Update insertion/merge
- // // logic to allow for floating storage within nodes.
- // field_type start;
+ // // A pointer to the node's parent.
+ // btree_node *parent;
+ //
+ // // The position of the node in the node's parent.
+ // field_type position;
+ // // The index of the first populated value in `values`.
+ // // TODO(ezb): right now, `start` is always 0. Update insertion/merge
+ // // logic to allow for floating storage within nodes.
+ // field_type start;
// // The index after the last populated value in `values`. Currently, this
// // is the same as the count of values.
// field_type finish;
- // // The maximum number of values the node can hold. This is an integer in
+ // // The maximum number of values the node can hold. This is an integer in
// // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
- // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
+ // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
// // nodes (even though there are still kNodeSlots values in the node).
- // // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
- // // to free extra bits for is_root, etc.
- // field_type max_count;
- //
- // // The array of values. The capacity is `max_count` for leaf nodes and
+ // // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
+ // // to free extra bits for is_root, etc.
+ // field_type max_count;
+ //
+ // // The array of values. The capacity is `max_count` for leaf nodes and
// // kNodeSlots for internal nodes. Only the values in
// // [start, finish) have been initialized and are valid.
- // slot_type values[max_count];
- //
- // // The array of child pointers. The keys in children[i] are all less
- // // than key(i). The keys in children[i + 1] are all greater than key(i).
+ // slot_type values[max_count];
+ //
+ // // The array of child pointers. The keys in children[i] are all less
+ // // than key(i). The keys in children[i + 1] are all greater than key(i).
// // There are 0 children for leaf nodes and kNodeSlots + 1 children for
- // // internal nodes.
+ // // internal nodes.
// btree_node *children[kNodeSlots + 1];
- //
- // This class is only constructed by EmptyNodeType. Normally, pointers to the
- // layout above are allocated, cast to btree_node*, and de-allocated within
- // the btree implementation.
- ~btree_node() = default;
- btree_node(btree_node const &) = delete;
- btree_node &operator=(btree_node const &) = delete;
-
- // Public for EmptyNodeType.
- constexpr static size_type Alignment() {
- static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
- "Alignment of all nodes must be equal.");
- return InternalLayout().Alignment();
- }
-
- protected:
- btree_node() = default;
-
- private:
- using layout_type = absl::container_internal::Layout<btree_node *, field_type,
- slot_type, btree_node *>;
+ //
+ // This class is only constructed by EmptyNodeType. Normally, pointers to the
+ // layout above are allocated, cast to btree_node*, and de-allocated within
+ // the btree implementation.
+ ~btree_node() = default;
+ btree_node(btree_node const &) = delete;
+ btree_node &operator=(btree_node const &) = delete;
+
+ // Public for EmptyNodeType.
+ constexpr static size_type Alignment() {
+ static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
+ "Alignment of all nodes must be equal.");
+ return InternalLayout().Alignment();
+ }
+
+ protected:
+ btree_node() = default;
+
+ private:
+ using layout_type = absl::container_internal::Layout<btree_node *, field_type,
+ slot_type, btree_node *>;
constexpr static size_type SizeWithNSlots(size_type n) {
- return layout_type(/*parent*/ 1,
+ return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
/*slots*/ n,
- /*children*/ 0)
- .AllocSize();
- }
- // A lower bound for the overhead of fields other than values in a leaf node.
- constexpr static size_type MinimumOverhead() {
+ /*children*/ 0)
+ .AllocSize();
+ }
+ // A lower bound for the overhead of fields other than values in a leaf node.
+ constexpr static size_type MinimumOverhead() {
return SizeWithNSlots(1) - sizeof(value_type);
- }
-
- // Compute how many values we can fit onto a leaf node taking into account
- // padding.
+ }
+
+ // Compute how many values we can fit onto a leaf node taking into account
+ // padding.
constexpr static size_type NodeTargetSlots(const int begin, const int end) {
- return begin == end ? begin
+ return begin == end ? begin
: SizeWithNSlots((begin + end) / 2 + 1) >
- params_type::kTargetNodeSize
+ params_type::kTargetNodeSize
? NodeTargetSlots(begin, (begin + end) / 2)
: NodeTargetSlots((begin + end) / 2 + 1, end);
- }
-
- enum {
- kTargetNodeSize = params_type::kTargetNodeSize,
+ }
+
+ enum {
+ kTargetNodeSize = params_type::kTargetNodeSize,
kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
-
+
// We need a minimum of 3 slots per internal node in order to perform
- // splitting (1 value for the two nodes involved in the split and 1 value
+ // splitting (1 value for the two nodes involved in the split and 1 value
// propagated to the parent as the delimiter for the split). For performance
// reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
// of 1/3 (for a node, not a b-tree).
kMinNodeSlots = 4,
-
+
kNodeSlots =
kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
- // The node is internal (i.e. is not a leaf node) if and only if `max_count`
- // has this value.
- kInternalNodeMaxCount = 0,
- };
-
+ // The node is internal (i.e. is not a leaf node) if and only if `max_count`
+ // has this value.
+ kInternalNodeMaxCount = 0,
+ };
+
// Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
- return layout_type(/*parent*/ 1,
+ return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
/*slots*/ slot_count,
- /*children*/ 0);
- }
- constexpr static layout_type InternalLayout() {
- return layout_type(/*parent*/ 1,
+ /*children*/ 0);
+ }
+ constexpr static layout_type InternalLayout() {
+ return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
/*slots*/ kNodeSlots,
/*children*/ kNodeSlots + 1);
- }
+ }
constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
- }
- constexpr static size_type InternalSize() {
- return InternalLayout().AllocSize();
- }
-
- // N is the index of the type in the Layout definition.
- // ElementType<N> is the Nth type in the Layout definition.
- template <size_type N>
- inline typename layout_type::template ElementType<N> *GetField() {
- // We assert that we don't read from values that aren't there.
- assert(N < 3 || !leaf());
- return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
- }
- template <size_type N>
- inline const typename layout_type::template ElementType<N> *GetField() const {
- assert(N < 3 || !leaf());
- return InternalLayout().template Pointer<N>(
- reinterpret_cast<const char *>(this));
- }
- void set_parent(btree_node *p) { *GetField<0>() = p; }
+ }
+ constexpr static size_type InternalSize() {
+ return InternalLayout().AllocSize();
+ }
+
+ // N is the index of the type in the Layout definition.
+ // ElementType<N> is the Nth type in the Layout definition.
+ template <size_type N>
+ inline typename layout_type::template ElementType<N> *GetField() {
+ // We assert that we don't read from values that aren't there.
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
+ }
+ template <size_type N>
+ inline const typename layout_type::template ElementType<N> *GetField() const {
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(
+ reinterpret_cast<const char *>(this));
+ }
+ void set_parent(btree_node *p) { *GetField<0>() = p; }
field_type &mutable_finish() { return GetField<1>()[2]; }
- slot_type *slot(int i) { return &GetField<2>()[i]; }
+ slot_type *slot(int i) { return &GetField<2>()[i]; }
slot_type *start_slot() { return slot(start()); }
slot_type *finish_slot() { return slot(finish()); }
- const slot_type *slot(int i) const { return &GetField<2>()[i]; }
- void set_position(field_type v) { GetField<1>()[0] = v; }
- void set_start(field_type v) { GetField<1>()[1] = v; }
+ const slot_type *slot(int i) const { return &GetField<2>()[i]; }
+ void set_position(field_type v) { GetField<1>()[0] = v; }
+ void set_start(field_type v) { GetField<1>()[1] = v; }
void set_finish(field_type v) { GetField<1>()[2] = v; }
- // This method is only called by the node init methods.
- void set_max_count(field_type v) { GetField<1>()[3] = v; }
-
- public:
- // Whether this is a leaf node or not. This value doesn't change after the
- // node is created.
- bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
-
- // Getter for the position of this node in its parent.
- field_type position() const { return GetField<1>()[0]; }
-
- // Getter for the offset of the first value in the `values` array.
+ // This method is only called by the node init methods.
+ void set_max_count(field_type v) { GetField<1>()[3] = v; }
+
+ public:
+ // Whether this is a leaf node or not. This value doesn't change after the
+ // node is created.
+ bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+
+ // Getter for the position of this node in its parent.
+ field_type position() const { return GetField<1>()[0]; }
+
+ // Getter for the offset of the first value in the `values` array.
field_type start() const {
// TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
assert(GetField<1>()[1] == 0);
return 0;
}
-
+
// Getter for the offset after the last value in the `values` array.
field_type finish() const { return GetField<1>()[2]; }
- // Getters for the number of values stored in this node.
+ // Getters for the number of values stored in this node.
field_type count() const {
assert(finish() >= start());
return finish() - start();
}
- field_type max_count() const {
- // Internal nodes have max_count==kInternalNodeMaxCount.
+ field_type max_count() const {
+ // Internal nodes have max_count==kInternalNodeMaxCount.
// Leaf nodes have max_count in [1, kNodeSlots].
- const field_type max_count = GetField<1>()[3];
- return max_count == field_type{kInternalNodeMaxCount}
+ const field_type max_count = GetField<1>()[3];
+ return max_count == field_type{kInternalNodeMaxCount}
? field_type{kNodeSlots}
- : max_count;
- }
-
- // Getter for the parent of this node.
- btree_node *parent() const { return *GetField<0>(); }
- // Getter for whether the node is the root of the tree. The parent of the
- // root of the tree is the leftmost node in the tree which is guaranteed to
- // be a leaf.
- bool is_root() const { return parent()->leaf(); }
- void make_root() {
- assert(parent()->is_root());
- set_parent(parent()->parent());
- }
-
- // Getters for the key/value at position i in the node.
- const key_type &key(int i) const { return params_type::key(slot(i)); }
- reference value(int i) { return params_type::element(slot(i)); }
- const_reference value(int i) const { return params_type::element(slot(i)); }
-
- // Getters/setter for the child at position i in the node.
- btree_node *child(int i) const { return GetField<3>()[i]; }
+ : max_count;
+ }
+
+ // Getter for the parent of this node.
+ btree_node *parent() const { return *GetField<0>(); }
+ // Getter for whether the node is the root of the tree. The parent of the
+ // root of the tree is the leftmost node in the tree which is guaranteed to
+ // be a leaf.
+ bool is_root() const { return parent()->leaf(); }
+ void make_root() {
+ assert(parent()->is_root());
+ set_parent(parent()->parent());
+ }
+
+ // Getters for the key/value at position i in the node.
+ const key_type &key(int i) const { return params_type::key(slot(i)); }
+ reference value(int i) { return params_type::element(slot(i)); }
+ const_reference value(int i) const { return params_type::element(slot(i)); }
+
+ // Getters/setter for the child at position i in the node.
+ btree_node *child(int i) const { return GetField<3>()[i]; }
btree_node *start_child() const { return child(start()); }
- btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
- void clear_child(int i) {
- absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
- }
- void set_child(int i, btree_node *c) {
- absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
- mutable_child(i) = c;
- c->set_position(i);
- }
- void init_child(int i, btree_node *c) {
- set_child(i, c);
- c->set_parent(this);
- }
-
- // Returns the position of the first value whose key is not less than k.
- template <typename K>
- SearchResult<int, is_key_compare_to::value> lower_bound(
- const K &k, const key_compare &comp) const {
- return use_linear_search::value ? linear_search(k, comp)
- : binary_search(k, comp);
- }
- // Returns the position of the first value whose key is greater than k.
- template <typename K>
- int upper_bound(const K &k, const key_compare &comp) const {
- auto upper_compare = upper_bound_adapter<key_compare>(comp);
- return use_linear_search::value ? linear_search(k, upper_compare).value
- : binary_search(k, upper_compare).value;
- }
-
- template <typename K, typename Compare>
- SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
- linear_search(const K &k, const Compare &comp) const {
+ btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+ void clear_child(int i) {
+ absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
+ }
+ void set_child(int i, btree_node *c) {
+ absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
+ mutable_child(i) = c;
+ c->set_position(i);
+ }
+ void init_child(int i, btree_node *c) {
+ set_child(i, c);
+ c->set_parent(this);
+ }
+
+ // Returns the position of the first value whose key is not less than k.
+ template <typename K>
+ SearchResult<int, is_key_compare_to::value> lower_bound(
+ const K &k, const key_compare &comp) const {
+ return use_linear_search::value ? linear_search(k, comp)
+ : binary_search(k, comp);
+ }
+ // Returns the position of the first value whose key is greater than k.
+ template <typename K>
+ int upper_bound(const K &k, const key_compare &comp) const {
+ auto upper_compare = upper_bound_adapter<key_compare>(comp);
+ return use_linear_search::value ? linear_search(k, upper_compare).value
+ : binary_search(k, upper_compare).value;
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ linear_search(const K &k, const Compare &comp) const {
return linear_search_impl(k, start(), finish(), comp,
- btree_is_key_compare_to<Compare, key_type>());
- }
-
- template <typename K, typename Compare>
- SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
- binary_search(const K &k, const Compare &comp) const {
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ binary_search(const K &k, const Compare &comp) const {
return binary_search_impl(k, start(), finish(), comp,
- btree_is_key_compare_to<Compare, key_type>());
- }
-
- // Returns the position of the first value whose key is not less than k using
- // linear search performed using plain compare.
- template <typename K, typename Compare>
- SearchResult<int, false> linear_search_impl(
- const K &k, int s, const int e, const Compare &comp,
- std::false_type /* IsCompareTo */) const {
- while (s < e) {
- if (!comp(key(s), k)) {
- break;
- }
- ++s;
- }
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s < e) {
+ if (!comp(key(s), k)) {
+ break;
+ }
+ ++s;
+ }
return SearchResult<int, false>{s};
- }
-
- // Returns the position of the first value whose key is not less than k using
- // linear search performed using compare-to.
- template <typename K, typename Compare>
- SearchResult<int, true> linear_search_impl(
- const K &k, int s, const int e, const Compare &comp,
- std::true_type /* IsCompareTo */) const {
- while (s < e) {
- const absl::weak_ordering c = comp(key(s), k);
- if (c == 0) {
- return {s, MatchKind::kEq};
- } else if (c > 0) {
- break;
- }
- ++s;
- }
- return {s, MatchKind::kNe};
- }
-
- // Returns the position of the first value whose key is not less than k using
- // binary search performed using plain compare.
- template <typename K, typename Compare>
- SearchResult<int, false> binary_search_impl(
- const K &k, int s, int e, const Compare &comp,
- std::false_type /* IsCompareTo */) const {
- while (s != e) {
- const int mid = (s + e) >> 1;
- if (comp(key(mid), k)) {
- s = mid + 1;
- } else {
- e = mid;
- }
- }
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using compare-to.
+ template <typename K, typename Compare>
+ SearchResult<int, true> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::true_type /* IsCompareTo */) const {
+ while (s < e) {
+ const absl::weak_ordering c = comp(key(s), k);
+ if (c == 0) {
+ return {s, MatchKind::kEq};
+ } else if (c > 0) {
+ break;
+ }
+ ++s;
+ }
+ return {s, MatchKind::kNe};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> binary_search_impl(
+ const K &k, int s, int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ if (comp(key(mid), k)) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ }
+ }
return SearchResult<int, false>{s};
- }
-
- // Returns the position of the first value whose key is not less than k using
- // binary search performed using compare-to.
- template <typename K, typename CompareTo>
- SearchResult<int, true> binary_search_impl(
- const K &k, int s, int e, const CompareTo &comp,
- std::true_type /* IsCompareTo */) const {
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using compare-to.
+ template <typename K, typename CompareTo>
+ SearchResult<int, true> binary_search_impl(
+ const K &k, int s, int e, const CompareTo &comp,
+ std::true_type /* IsCompareTo */) const {
if (params_type::template can_have_multiple_equivalent_keys<K>()) {
- MatchKind exact_match = MatchKind::kNe;
- while (s != e) {
- const int mid = (s + e) >> 1;
- const absl::weak_ordering c = comp(key(mid), k);
- if (c < 0) {
- s = mid + 1;
- } else {
- e = mid;
- if (c == 0) {
- // Need to return the first value whose key is not less than k,
+ MatchKind exact_match = MatchKind::kNe;
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ if (c == 0) {
+ // Need to return the first value whose key is not less than k,
// which requires continuing the binary search if there could be
// multiple equivalent keys.
- exact_match = MatchKind::kEq;
- }
- }
- }
- return {s, exact_match};
+ exact_match = MatchKind::kEq;
+ }
+ }
+ }
+ return {s, exact_match};
} else { // Can't have multiple equivalent keys.
- while (s != e) {
- const int mid = (s + e) >> 1;
- const absl::weak_ordering c = comp(key(mid), k);
- if (c < 0) {
- s = mid + 1;
- } else if (c > 0) {
- e = mid;
- } else {
- return {mid, MatchKind::kEq};
- }
- }
- return {s, MatchKind::kNe};
- }
- }
-
- // Emplaces a value at position i, shifting all existing values and
- // children at positions >= i to the right by 1.
- template <typename... Args>
- void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
-
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else if (c > 0) {
+ e = mid;
+ } else {
+ return {mid, MatchKind::kEq};
+ }
+ }
+ return {s, MatchKind::kNe};
+ }
+ }
+
+ // Emplaces a value at position i, shifting all existing values and
+ // children at positions >= i to the right by 1.
+ template <typename... Args>
+ void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
+
// Removes the values at positions [i, i + to_erase), shifting all existing
// values and children after that range to the left by to_erase. Clears all
// children between [i, i + to_erase).
void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
-
- // Rebalances a node with its right sibling.
- void rebalance_right_to_left(int to_move, btree_node *right,
- allocator_type *alloc);
- void rebalance_left_to_right(int to_move, btree_node *right,
- allocator_type *alloc);
-
- // Splits a node, moving a portion of the node's values to its right sibling.
- void split(int insert_position, btree_node *dest, allocator_type *alloc);
-
- // Merges a node with its right sibling, moving all of the values and the
+
+ // Rebalances a node with its right sibling.
+ void rebalance_right_to_left(int to_move, btree_node *right,
+ allocator_type *alloc);
+ void rebalance_left_to_right(int to_move, btree_node *right,
+ allocator_type *alloc);
+
+ // Splits a node, moving a portion of the node's values to its right sibling.
+ void split(int insert_position, btree_node *dest, allocator_type *alloc);
+
+ // Merges a node with its right sibling, moving all of the values and the
// delimiting key in the parent node onto itself, and deleting the src node.
void merge(btree_node *src, allocator_type *alloc);
-
- // Node allocation/deletion routines.
+
+ // Node allocation/deletion routines.
void init_leaf(btree_node *parent, int max_count) {
set_parent(parent);
set_position(0);
set_start(0);
set_finish(0);
set_max_count(max_count);
- absl::container_internal::SanitizerPoisonMemoryRegion(
+ absl::container_internal::SanitizerPoisonMemoryRegion(
start_slot(), max_count * sizeof(slot_type));
- }
+ }
void init_internal(btree_node *parent) {
init_leaf(parent, kNodeSlots);
- // Set `max_count` to a sentinel value to indicate that this node is
- // internal.
+ // Set `max_count` to a sentinel value to indicate that this node is
+ // internal.
set_max_count(kInternalNodeMaxCount);
- absl::container_internal::SanitizerPoisonMemoryRegion(
+ absl::container_internal::SanitizerPoisonMemoryRegion(
&mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
- }
+ }
static void deallocate(const size_type size, btree_node *node,
allocator_type *alloc) {
absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
- }
-
+ }
+
// Deletes a node and all of its children.
static void clear_and_delete(btree_node *node, allocator_type *alloc);
- private:
- template <typename... Args>
+ private:
+ template <typename... Args>
void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
- absl::container_internal::SanitizerUnpoisonObject(slot(i));
- params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
- }
+ absl::container_internal::SanitizerUnpoisonObject(slot(i));
+ params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
+ }
void value_destroy(const field_type i, allocator_type *alloc) {
- params_type::destroy(alloc, slot(i));
- absl::container_internal::SanitizerPoisonObject(slot(i));
- }
+ params_type::destroy(alloc, slot(i));
+ absl::container_internal::SanitizerPoisonObject(slot(i));
+ }
void value_destroy_n(const field_type i, const field_type n,
allocator_type *alloc) {
for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
@@ -893,7 +893,7 @@ class btree_node {
absl::container_internal::SanitizerPoisonObject(s);
}
}
-
+
static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
absl::container_internal::SanitizerUnpoisonObject(dest);
params_type::transfer(alloc, dest, src);
@@ -913,11 +913,11 @@ class btree_node {
allocator_type *alloc) {
for (slot_type *src = src_node->slot(src_i), *end = src + n,
*dest = slot(dest_i);
- src != end; ++src, ++dest) {
+ src != end; ++src, ++dest) {
transfer(dest, src, alloc);
- }
- }
-
+ }
+ }
+
// Same as above, except that we start at the end and work our way to the
// beginning.
void transfer_n_backward(const size_type n, const size_type dest_i,
@@ -927,267 +927,267 @@ class btree_node {
*dest = slot(dest_i + n - 1);
src != end; --src, --dest) {
transfer(dest, src, alloc);
- }
- }
-
- template <typename P>
- friend class btree;
- template <typename N, typename R, typename P>
- friend struct btree_iterator;
- friend class BtreeNodePeer;
-};
-
-template <typename Node, typename Reference, typename Pointer>
-struct btree_iterator {
- private:
- using key_type = typename Node::key_type;
- using size_type = typename Node::size_type;
- using params_type = typename Node::params_type;
+ }
+ }
+
+ template <typename P>
+ friend class btree;
+ template <typename N, typename R, typename P>
+ friend struct btree_iterator;
+ friend class BtreeNodePeer;
+};
+
+template <typename Node, typename Reference, typename Pointer>
+struct btree_iterator {
+ private:
+ using key_type = typename Node::key_type;
+ using size_type = typename Node::size_type;
+ using params_type = typename Node::params_type;
using is_map_container = typename params_type::is_map_container;
-
- using node_type = Node;
- using normal_node = typename std::remove_const<Node>::type;
- using const_node = const Node;
- using normal_pointer = typename params_type::pointer;
- using normal_reference = typename params_type::reference;
- using const_pointer = typename params_type::const_pointer;
- using const_reference = typename params_type::const_reference;
- using slot_type = typename params_type::slot_type;
-
- using iterator =
+
+ using node_type = Node;
+ using normal_node = typename std::remove_const<Node>::type;
+ using const_node = const Node;
+ using normal_pointer = typename params_type::pointer;
+ using normal_reference = typename params_type::reference;
+ using const_pointer = typename params_type::const_pointer;
+ using const_reference = typename params_type::const_reference;
+ using slot_type = typename params_type::slot_type;
+
+ using iterator =
btree_iterator<normal_node, normal_reference, normal_pointer>;
- using const_iterator =
- btree_iterator<const_node, const_reference, const_pointer>;
-
- public:
- // These aliases are public for std::iterator_traits.
- using difference_type = typename Node::difference_type;
- using value_type = typename params_type::value_type;
- using pointer = Pointer;
- using reference = Reference;
- using iterator_category = std::bidirectional_iterator_tag;
-
- btree_iterator() : node(nullptr), position(-1) {}
+ using const_iterator =
+ btree_iterator<const_node, const_reference, const_pointer>;
+
+ public:
+ // These aliases are public for std::iterator_traits.
+ using difference_type = typename Node::difference_type;
+ using value_type = typename params_type::value_type;
+ using pointer = Pointer;
+ using reference = Reference;
+ using iterator_category = std::bidirectional_iterator_tag;
+
+ btree_iterator() : node(nullptr), position(-1) {}
explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
- btree_iterator(Node *n, int p) : node(n), position(p) {}
-
- // NOTE: this SFINAE allows for implicit conversions from iterator to
+ btree_iterator(Node *n, int p) : node(n), position(p) {}
+
+ // NOTE: this SFINAE allows for implicit conversions from iterator to
// const_iterator, but it specifically avoids hiding the copy constructor so
// that the trivial one will be used when possible.
- template <typename N, typename R, typename P,
- absl::enable_if_t<
- std::is_same<btree_iterator<N, R, P>, iterator>::value &&
- std::is_same<btree_iterator, const_iterator>::value,
- int> = 0>
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, iterator>::value &&
+ std::is_same<btree_iterator, const_iterator>::value,
+ int> = 0>
btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
: node(other.node), position(other.position) {}
-
- private:
- // This SFINAE allows explicit conversions from const_iterator to
+
+ private:
+ // This SFINAE allows explicit conversions from const_iterator to
// iterator, but also avoids hiding the copy constructor.
- // NOTE: the const_cast is safe because this constructor is only called by
- // non-const methods and the container owns the nodes.
- template <typename N, typename R, typename P,
- absl::enable_if_t<
- std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
- std::is_same<btree_iterator, iterator>::value,
- int> = 0>
+ // NOTE: the const_cast is safe because this constructor is only called by
+ // non-const methods and the container owns the nodes.
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+ std::is_same<btree_iterator, iterator>::value,
+ int> = 0>
explicit btree_iterator(const btree_iterator<N, R, P> other)
: node(const_cast<node_type *>(other.node)), position(other.position) {}
-
- // Increment/decrement the iterator.
- void increment() {
+
+ // Increment/decrement the iterator.
+ void increment() {
if (node->leaf() && ++position < node->finish()) {
- return;
- }
- increment_slow();
- }
- void increment_slow();
-
- void decrement() {
+ return;
+ }
+ increment_slow();
+ }
+ void increment_slow();
+
+ void decrement() {
if (node->leaf() && --position >= node->start()) {
- return;
- }
- decrement_slow();
- }
- void decrement_slow();
-
- public:
+ return;
+ }
+ decrement_slow();
+ }
+ void decrement_slow();
+
+ public:
bool operator==(const iterator &other) const {
return node == other.node && position == other.position;
- }
+ }
bool operator==(const const_iterator &other) const {
return node == other.node && position == other.position;
- }
+ }
bool operator!=(const iterator &other) const {
return node != other.node || position != other.position;
}
bool operator!=(const const_iterator &other) const {
return node != other.node || position != other.position;
}
-
- // Accessors for the key/value the iterator is pointing at.
- reference operator*() const {
+
+ // Accessors for the key/value the iterator is pointing at.
+ reference operator*() const {
ABSL_HARDENING_ASSERT(node != nullptr);
ABSL_HARDENING_ASSERT(node->start() <= position);
ABSL_HARDENING_ASSERT(node->finish() > position);
- return node->value(position);
- }
+ return node->value(position);
+ }
pointer operator->() const { return &operator*(); }
-
+
btree_iterator &operator++() {
- increment();
- return *this;
- }
+ increment();
+ return *this;
+ }
btree_iterator &operator--() {
- decrement();
- return *this;
- }
- btree_iterator operator++(int) {
- btree_iterator tmp = *this;
- ++*this;
- return tmp;
- }
- btree_iterator operator--(int) {
- btree_iterator tmp = *this;
- --*this;
- return tmp;
- }
-
- private:
+ decrement();
+ return *this;
+ }
+ btree_iterator operator++(int) {
+ btree_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+ btree_iterator operator--(int) {
+ btree_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+
+ private:
friend iterator;
friend const_iterator;
- template <typename Params>
- friend class btree;
- template <typename Tree>
- friend class btree_container;
- template <typename Tree>
- friend class btree_set_container;
- template <typename Tree>
- friend class btree_map_container;
- template <typename Tree>
- friend class btree_multiset_container;
- template <typename TreeType, typename CheckerType>
- friend class base_checker;
-
- const key_type &key() const { return node->key(position); }
- slot_type *slot() { return node->slot(position); }
-
- // The node in the tree the iterator is pointing at.
- Node *node;
- // The position within the node of the tree the iterator is pointing at.
+ template <typename Params>
+ friend class btree;
+ template <typename Tree>
+ friend class btree_container;
+ template <typename Tree>
+ friend class btree_set_container;
+ template <typename Tree>
+ friend class btree_map_container;
+ template <typename Tree>
+ friend class btree_multiset_container;
+ template <typename TreeType, typename CheckerType>
+ friend class base_checker;
+
+ const key_type &key() const { return node->key(position); }
+ slot_type *slot() { return node->slot(position); }
+
+ // The node in the tree the iterator is pointing at.
+ Node *node;
+ // The position within the node of the tree the iterator is pointing at.
// NOTE: this is an int rather than a field_type because iterators can point
// to invalid positions (such as -1) in certain circumstances.
- int position;
-};
-
-template <typename Params>
-class btree {
- using node_type = btree_node<Params>;
- using is_key_compare_to = typename Params::is_key_compare_to;
+ int position;
+};
+
+template <typename Params>
+class btree {
+ using node_type = btree_node<Params>;
+ using is_key_compare_to = typename Params::is_key_compare_to;
using init_type = typename Params::init_type;
using field_type = typename node_type::field_type;
-
- // We use a static empty node for the root/leftmost/rightmost of empty btrees
- // in order to avoid branching in begin()/end().
- struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
- using field_type = typename node_type::field_type;
- node_type *parent;
- field_type position = 0;
- field_type start = 0;
+
+ // We use a static empty node for the root/leftmost/rightmost of empty btrees
+ // in order to avoid branching in begin()/end().
+ struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
+ using field_type = typename node_type::field_type;
+ node_type *parent;
+ field_type position = 0;
+ field_type start = 0;
field_type finish = 0;
- // max_count must be != kInternalNodeMaxCount (so that this node is regarded
- // as a leaf node). max_count() is never called when the tree is empty.
- field_type max_count = node_type::kInternalNodeMaxCount + 1;
-
-#ifdef _MSC_VER
- // MSVC has constexpr code generations bugs here.
- EmptyNodeType() : parent(this) {}
-#else
- constexpr EmptyNodeType(node_type *p) : parent(p) {}
-#endif
- };
-
- static node_type *EmptyNode() {
-#ifdef _MSC_VER
+ // max_count must be != kInternalNodeMaxCount (so that this node is regarded
+ // as a leaf node). max_count() is never called when the tree is empty.
+ field_type max_count = node_type::kInternalNodeMaxCount + 1;
+
+#ifdef _MSC_VER
+ // MSVC has constexpr code generations bugs here.
+ EmptyNodeType() : parent(this) {}
+#else
+ constexpr EmptyNodeType(node_type *p) : parent(p) {}
+#endif
+ };
+
+ static node_type *EmptyNode() {
+#ifdef _MSC_VER
static EmptyNodeType *empty_node = new EmptyNodeType;
- // This assert fails on some other construction methods.
- assert(empty_node->parent == empty_node);
- return empty_node;
-#else
- static constexpr EmptyNodeType empty_node(
- const_cast<EmptyNodeType *>(&empty_node));
- return const_cast<EmptyNodeType *>(&empty_node);
-#endif
- }
-
+ // This assert fails on some other construction methods.
+ assert(empty_node->parent == empty_node);
+ return empty_node;
+#else
+ static constexpr EmptyNodeType empty_node(
+ const_cast<EmptyNodeType *>(&empty_node));
+ return const_cast<EmptyNodeType *>(&empty_node);
+#endif
+ }
+
enum : uint32_t {
kNodeSlots = node_type::kNodeSlots,
kMinNodeValues = kNodeSlots / 2,
- };
-
- struct node_stats {
- using size_type = typename Params::size_type;
-
+ };
+
+ struct node_stats {
+ using size_type = typename Params::size_type;
+
node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
-
+
node_stats &operator+=(const node_stats &other) {
leaf_nodes += other.leaf_nodes;
internal_nodes += other.internal_nodes;
- return *this;
- }
-
- size_type leaf_nodes;
- size_type internal_nodes;
- };
-
- public:
- using key_type = typename Params::key_type;
- using value_type = typename Params::value_type;
- using size_type = typename Params::size_type;
- using difference_type = typename Params::difference_type;
- using key_compare = typename Params::key_compare;
+ return *this;
+ }
+
+ size_type leaf_nodes;
+ size_type internal_nodes;
+ };
+
+ public:
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+ using key_compare = typename Params::key_compare;
using original_key_compare = typename Params::original_key_compare;
- using value_compare = typename Params::value_compare;
- using allocator_type = typename Params::allocator_type;
- using reference = typename Params::reference;
- using const_reference = typename Params::const_reference;
- using pointer = typename Params::pointer;
- using const_pointer = typename Params::const_pointer;
+ using value_compare = typename Params::value_compare;
+ using allocator_type = typename Params::allocator_type;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
using iterator =
typename btree_iterator<node_type, reference, pointer>::iterator;
- using const_iterator = typename iterator::const_iterator;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- using node_handle_type = node_handle<Params, Params, allocator_type>;
-
- // Internal types made public for use by btree_container types.
- using params_type = Params;
- using slot_type = typename Params::slot_type;
-
- private:
- // For use in copy_or_move_values_in_order.
+ using const_iterator = typename iterator::const_iterator;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using node_handle_type = node_handle<Params, Params, allocator_type>;
+
+ // Internal types made public for use by btree_container types.
+ using params_type = Params;
+ using slot_type = typename Params::slot_type;
+
+ private:
+ // For use in copy_or_move_values_in_order.
const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
value_type &&maybe_move_from_iterator(iterator it) {
// This is a destructive operation on the other container so it's safe for
// us to const_cast and move from the keys here even if it's a set.
return std::move(const_cast<value_type &>(*it));
}
-
- // Copies or moves (depending on the template parameter) the values in
+
+ // Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction,
// copy assignment, and move assignment.
- template <typename Btree>
+ template <typename Btree>
void copy_or_move_values_in_order(Btree &other);
-
- // Validates that various assumptions/requirements are true at compile time.
- constexpr static bool static_assert_validation();
-
- public:
+
+ // Validates that various assumptions/requirements are true at compile time.
+ constexpr static bool static_assert_validation();
+
+ public:
btree(const key_compare &comp, const allocator_type &alloc)
: root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
-
+
btree(const btree &other) : btree(other, other.allocator()) {}
btree(const btree &other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
@@ -1198,7 +1198,7 @@ class btree {
rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
size_(absl::exchange(other.size_, 0)) {
other.mutable_root() = EmptyNode();
- }
+ }
btree(btree &&other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
if (alloc == other.allocator()) {
@@ -1208,87 +1208,87 @@ class btree {
copy_or_move_values_in_order(other);
}
}
-
- ~btree() {
- // Put static_asserts in destructor to avoid triggering them before the type
- // is complete.
- static_assert(static_assert_validation(), "This call must be elided.");
- clear();
- }
-
+
+ ~btree() {
+ // Put static_asserts in destructor to avoid triggering them before the type
+ // is complete.
+ static_assert(static_assert_validation(), "This call must be elided.");
+ clear();
+ }
+
// Assign the contents of other to *this.
btree &operator=(const btree &other);
btree &operator=(btree &&other) noexcept;
-
+
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
iterator end() { return iterator(rightmost_, rightmost_->finish()); }
- const_iterator end() const {
+ const_iterator end() const {
return const_iterator(rightmost_, rightmost_->finish());
- }
+ }
reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator rbegin() const {
- return const_reverse_iterator(end());
- }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rend() const {
- return const_reverse_iterator(begin());
- }
-
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
// Finds the first element whose key is not less than `key`.
- template <typename K>
- iterator lower_bound(const K &key) {
+ template <typename K>
+ iterator lower_bound(const K &key) {
return internal_end(internal_lower_bound(key).value);
- }
- template <typename K>
- const_iterator lower_bound(const K &key) const {
+ }
+ template <typename K>
+ const_iterator lower_bound(const K &key) const {
return internal_end(internal_lower_bound(key).value);
- }
-
+ }
+
// Finds the first element whose key is not less than `key` and also returns
// whether that element is equal to `key`.
- template <typename K>
+ template <typename K>
std::pair<iterator, bool> lower_bound_equal(const K &key) const;
// Finds the first element whose key is greater than `key`.
template <typename K>
- iterator upper_bound(const K &key) {
- return internal_end(internal_upper_bound(key));
- }
- template <typename K>
- const_iterator upper_bound(const K &key) const {
- return internal_end(internal_upper_bound(key));
- }
-
- // Finds the range of values which compare equal to key. The first member of
+ iterator upper_bound(const K &key) {
+ return internal_end(internal_upper_bound(key));
+ }
+ template <typename K>
+ const_iterator upper_bound(const K &key) const {
+ return internal_end(internal_upper_bound(key));
+ }
+
+ // Finds the range of values which compare equal to key. The first member of
// the returned pair is equal to lower_bound(key). The second member of the
// pair is equal to upper_bound(key).
- template <typename K>
+ template <typename K>
std::pair<iterator, iterator> equal_range(const K &key);
- template <typename K>
- std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
+ template <typename K>
+ std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
return const_cast<btree *>(this)->equal_range(key);
- }
-
- // Inserts a value into the btree only if it does not already exist. The
- // boolean return value indicates whether insertion succeeded or failed.
- // Requirement: if `key` already exists in the btree, does not consume `args`.
- // Requirement: `key` is never referenced after consuming `args`.
+ }
+
+ // Inserts a value into the btree only if it does not already exist. The
+ // boolean return value indicates whether insertion succeeded or failed.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
template <typename K, typename... Args>
std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
-
- // Inserts with hint. Checks to see if the value should be placed immediately
- // before `position` in the tree. If so, then the insertion will take
- // amortized constant time. If not, the insertion will take amortized
- // logarithmic time as if a call to insert_unique() were made.
- // Requirement: if `key` already exists in the btree, does not consume `args`.
- // Requirement: `key` is never referenced after consuming `args`.
+
+ // Inserts with hint. Checks to see if the value should be placed immediately
+ // before `position` in the tree. If so, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_unique() were made.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
template <typename K, typename... Args>
- std::pair<iterator, bool> insert_hint_unique(iterator position,
+ std::pair<iterator, bool> insert_hint_unique(iterator position,
const K &key,
- Args &&... args);
-
- // Insert a range of values into the btree.
+ Args &&... args);
+
+ // Insert a range of values into the btree.
// Note: the first overload avoids constructing a value_type if the key
// already exists in the btree.
template <typename InputIterator,
@@ -1298,313 +1298,313 @@ class btree {
void insert_iterator_unique(InputIterator b, InputIterator e, int);
// We need the second overload for cases in which we need to construct a
// value_type in order to compare it with the keys already in the btree.
- template <typename InputIterator>
+ template <typename InputIterator>
void insert_iterator_unique(InputIterator b, InputIterator e, char);
-
- // Inserts a value into the btree.
- template <typename ValueType>
- iterator insert_multi(const key_type &key, ValueType &&v);
-
- // Inserts a value into the btree.
- template <typename ValueType>
- iterator insert_multi(ValueType &&v) {
- return insert_multi(params_type::key(v), std::forward<ValueType>(v));
- }
-
- // Insert with hint. Check to see if the value should be placed immediately
- // before position in the tree. If it does, then the insertion will take
- // amortized constant time. If not, the insertion will take amortized
- // logarithmic time as if a call to insert_multi(v) were made.
- template <typename ValueType>
- iterator insert_hint_multi(iterator position, ValueType &&v);
-
- // Insert a range of values into the btree.
- template <typename InputIterator>
- void insert_iterator_multi(InputIterator b, InputIterator e);
-
- // Erase the specified iterator from the btree. The iterator must be valid
- // (i.e. not equal to end()). Return an iterator pointing to the node after
- // the one that was erased (or end() if none exists).
- // Requirement: does not read the value at `*iter`.
- iterator erase(iterator iter);
-
- // Erases range. Returns the number of keys erased and an iterator pointing
- // to the element after the last erased element.
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(const key_type &key, ValueType &&v);
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(ValueType &&v) {
+ return insert_multi(params_type::key(v), std::forward<ValueType>(v));
+ }
+
+ // Insert with hint. Check to see if the value should be placed immediately
+ // before position in the tree. If it does, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_multi(v) were made.
+ template <typename ValueType>
+ iterator insert_hint_multi(iterator position, ValueType &&v);
+
+ // Insert a range of values into the btree.
+ template <typename InputIterator>
+ void insert_iterator_multi(InputIterator b, InputIterator e);
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ // Requirement: does not read the value at `*iter`.
+ iterator erase(iterator iter);
+
+ // Erases range. Returns the number of keys erased and an iterator pointing
+ // to the element after the last erased element.
std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
-
+
// Finds an element with key equivalent to `key` or returns `end()` if `key`
// is not present.
- template <typename K>
- iterator find(const K &key) {
- return internal_end(internal_find(key));
- }
- template <typename K>
- const_iterator find(const K &key) const {
- return internal_end(internal_find(key));
- }
-
- // Clear the btree, deleting all of the values it contains.
- void clear();
-
+ template <typename K>
+ iterator find(const K &key) {
+ return internal_end(internal_find(key));
+ }
+ template <typename K>
+ const_iterator find(const K &key) const {
+ return internal_end(internal_find(key));
+ }
+
+ // Clear the btree, deleting all of the values it contains.
+ void clear();
+
// Swaps the contents of `this` and `other`.
void swap(btree &other);
-
- const key_compare &key_comp() const noexcept {
- return root_.template get<0>();
- }
+
+ const key_compare &key_comp() const noexcept {
+ return root_.template get<0>();
+ }
template <typename K1, typename K2>
bool compare_keys(const K1 &a, const K2 &b) const {
return compare_internal::compare_result_as_less_than(key_comp()(a, b));
- }
-
+ }
+
value_compare value_comp() const {
return value_compare(original_key_compare(key_comp()));
}
-
- // Verifies the structure of the btree.
- void verify() const;
-
- // Size routines.
- size_type size() const { return size_; }
- size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
- bool empty() const { return size_ == 0; }
-
- // The height of the btree. An empty tree will have height 0.
- size_type height() const {
- size_type h = 0;
- if (!empty()) {
- // Count the length of the chain from the leftmost node up to the
- // root. We actually count from the root back around to the level below
- // the root, but the calculation is the same because of the circularity
- // of that traversal.
- const node_type *n = root();
- do {
- ++h;
- n = n->parent();
- } while (n != root());
- }
- return h;
- }
-
- // The number of internal, leaf and total nodes used by the btree.
+
+ // Verifies the structure of the btree.
+ void verify() const;
+
+ // Size routines.
+ size_type size() const { return size_; }
+ size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
+ bool empty() const { return size_ == 0; }
+
+ // The height of the btree. An empty tree will have height 0.
+ size_type height() const {
+ size_type h = 0;
+ if (!empty()) {
+ // Count the length of the chain from the leftmost node up to the
+ // root. We actually count from the root back around to the level below
+ // the root, but the calculation is the same because of the circularity
+ // of that traversal.
+ const node_type *n = root();
+ do {
+ ++h;
+ n = n->parent();
+ } while (n != root());
+ }
+ return h;
+ }
+
+ // The number of internal, leaf and total nodes used by the btree.
size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; }
- size_type internal_nodes() const {
- return internal_stats(root()).internal_nodes;
- }
- size_type nodes() const {
- node_stats stats = internal_stats(root());
- return stats.leaf_nodes + stats.internal_nodes;
- }
-
- // The total number of bytes used by the btree.
- size_type bytes_used() const {
- node_stats stats = internal_stats(root());
- if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
+ size_type internal_nodes() const {
+ return internal_stats(root()).internal_nodes;
+ }
+ size_type nodes() const {
+ node_stats stats = internal_stats(root());
+ return stats.leaf_nodes + stats.internal_nodes;
+ }
+
+ // The total number of bytes used by the btree.
+ size_type bytes_used() const {
+ node_stats stats = internal_stats(root());
+ if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
return sizeof(*this) + node_type::LeafSize(root()->max_count());
- } else {
+ } else {
return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() +
- stats.internal_nodes * node_type::InternalSize();
- }
- }
-
+ stats.internal_nodes * node_type::InternalSize();
+ }
+ }
+
// The average number of bytes used per value stored in the btree assuming
// random insertion order.
- static double average_bytes_per_value() {
+ static double average_bytes_per_value() {
// The expected number of values per node with random insertion order is the
// average of the maximum and minimum numbers of values per node.
const double expected_values_per_node =
(kNodeSlots + kMinNodeValues) / 2.0;
return node_type::LeafSize() / expected_values_per_node;
- }
-
- // The fullness of the btree. Computed as the number of elements in the btree
- // divided by the maximum number of elements a tree with the current number
- // of nodes could hold. A value of 1 indicates perfect space
- // utilization. Smaller values indicate space wastage.
- // Returns 0 for empty trees.
- double fullness() const {
- if (empty()) return 0.0;
+ }
+
+ // The fullness of the btree. Computed as the number of elements in the btree
+ // divided by the maximum number of elements a tree with the current number
+ // of nodes could hold. A value of 1 indicates perfect space
+ // utilization. Smaller values indicate space wastage.
+ // Returns 0 for empty trees.
+ double fullness() const {
+ if (empty()) return 0.0;
return static_cast<double>(size()) / (nodes() * kNodeSlots);
- }
- // The overhead of the btree structure in bytes per node. Computed as the
- // total number of bytes used by the btree minus the number of bytes used for
- // storing elements divided by the number of elements.
- // Returns 0 for empty trees.
- double overhead() const {
- if (empty()) return 0.0;
- return (bytes_used() - size() * sizeof(value_type)) /
- static_cast<double>(size());
- }
-
- // The allocator used by the btree.
+ }
+ // The overhead of the btree structure in bytes per node. Computed as the
+ // total number of bytes used by the btree minus the number of bytes used for
+ // storing elements divided by the number of elements.
+ // Returns 0 for empty trees.
+ double overhead() const {
+ if (empty()) return 0.0;
+ return (bytes_used() - size() * sizeof(value_type)) /
+ static_cast<double>(size());
+ }
+
+ // The allocator used by the btree.
allocator_type get_allocator() const { return allocator(); }
-
- private:
- // Internal accessor routines.
- node_type *root() { return root_.template get<2>(); }
- const node_type *root() const { return root_.template get<2>(); }
- node_type *&mutable_root() noexcept { return root_.template get<2>(); }
- key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
-
- // The leftmost node is stored as the parent of the root node.
- node_type *leftmost() { return root()->parent(); }
- const node_type *leftmost() const { return root()->parent(); }
-
- // Allocator routines.
- allocator_type *mutable_allocator() noexcept {
- return &root_.template get<1>();
- }
- const allocator_type &allocator() const noexcept {
- return root_.template get<1>();
- }
-
- // Allocates a correctly aligned node of at least size bytes using the
- // allocator.
- node_type *allocate(const size_type size) {
- return reinterpret_cast<node_type *>(
- absl::container_internal::Allocate<node_type::Alignment()>(
- mutable_allocator(), size));
- }
-
- // Node creation/deletion routines.
+
+ private:
+ // Internal accessor routines.
+ node_type *root() { return root_.template get<2>(); }
+ const node_type *root() const { return root_.template get<2>(); }
+ node_type *&mutable_root() noexcept { return root_.template get<2>(); }
+ key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+
+ // The leftmost node is stored as the parent of the root node.
+ node_type *leftmost() { return root()->parent(); }
+ const node_type *leftmost() const { return root()->parent(); }
+
+ // Allocator routines.
+ allocator_type *mutable_allocator() noexcept {
+ return &root_.template get<1>();
+ }
+ const allocator_type &allocator() const noexcept {
+ return root_.template get<1>();
+ }
+
+ // Allocates a correctly aligned node of at least size bytes using the
+ // allocator.
+ node_type *allocate(const size_type size) {
+ return reinterpret_cast<node_type *>(
+ absl::container_internal::Allocate<node_type::Alignment()>(
+ mutable_allocator(), size));
+ }
+
+ // Node creation/deletion routines.
node_type *new_internal_node(node_type *parent) {
node_type *n = allocate(node_type::InternalSize());
n->init_internal(parent);
return n;
- }
+ }
node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
n->init_leaf(parent, kNodeSlots);
return n;
- }
- node_type *new_leaf_root_node(const int max_count) {
+ }
+ node_type *new_leaf_root_node(const int max_count) {
node_type *n = allocate(node_type::LeafSize(max_count));
n->init_leaf(/*parent=*/n, max_count);
return n;
- }
-
- // Deletion helper routines.
- iterator rebalance_after_delete(iterator iter);
-
- // Rebalances or splits the node iter points to.
- void rebalance_or_split(iterator *iter);
-
- // Merges the values of left, right and the delimiting key on their parent
- // onto left, removing the delimiting key and deleting right.
- void merge_nodes(node_type *left, node_type *right);
-
- // Tries to merge node with its left or right sibling, and failing that,
- // rebalance with its left or right sibling. Returns true if a merge
- // occurred, at which point it is no longer valid to access node. Returns
- // false if no merging took place.
- bool try_merge_or_rebalance(iterator *iter);
-
- // Tries to shrink the height of the tree by 1.
- void try_shrink();
-
- iterator internal_end(iterator iter) {
- return iter.node != nullptr ? iter : end();
- }
- const_iterator internal_end(const_iterator iter) const {
- return iter.node != nullptr ? iter : end();
- }
-
- // Emplaces a value into the btree immediately before iter. Requires that
- // key(v) <= iter.key() and (--iter).key() <= key(v).
- template <typename... Args>
- iterator internal_emplace(iterator iter, Args &&... args);
-
- // Returns an iterator pointing to the first value >= the value "iter" is
+ }
+
+ // Deletion helper routines.
+ iterator rebalance_after_delete(iterator iter);
+
+ // Rebalances or splits the node iter points to.
+ void rebalance_or_split(iterator *iter);
+
+ // Merges the values of left, right and the delimiting key on their parent
+ // onto left, removing the delimiting key and deleting right.
+ void merge_nodes(node_type *left, node_type *right);
+
+ // Tries to merge node with its left or right sibling, and failing that,
+ // rebalance with its left or right sibling. Returns true if a merge
+ // occurred, at which point it is no longer valid to access node. Returns
+ // false if no merging took place.
+ bool try_merge_or_rebalance(iterator *iter);
+
+ // Tries to shrink the height of the tree by 1.
+ void try_shrink();
+
+ iterator internal_end(iterator iter) {
+ return iter.node != nullptr ? iter : end();
+ }
+ const_iterator internal_end(const_iterator iter) const {
+ return iter.node != nullptr ? iter : end();
+ }
+
+ // Emplaces a value into the btree immediately before iter. Requires that
+ // key(v) <= iter.key() and (--iter).key() <= key(v).
+ template <typename... Args>
+ iterator internal_emplace(iterator iter, Args &&... args);
+
+ // Returns an iterator pointing to the first value >= the value "iter" is
// pointing at. Note that "iter" might be pointing to an invalid location such
// as iter.position == iter.node->finish(). This routine simply moves iter up
// in the tree to a valid location.
- // Requires: iter.node is non-null.
- template <typename IterType>
- static IterType internal_last(IterType iter);
-
- // Returns an iterator pointing to the leaf position at which key would
+ // Requires: iter.node is non-null.
+ template <typename IterType>
+ static IterType internal_last(IterType iter);
+
+ // Returns an iterator pointing to the leaf position at which key would
// reside in the tree, unless there is an exact match - in which case, the
// result may not be on a leaf. When there's a three-way comparator, we can
// return whether there was an exact match. This allows the caller to avoid a
// subsequent comparison to determine if an exact match was made, which is
// important for keys with expensive comparison, such as strings.
- template <typename K>
- SearchResult<iterator, is_key_compare_to::value> internal_locate(
- const K &key) const;
-
- // Internal routine which implements lower_bound().
- template <typename K>
+ template <typename K>
+ SearchResult<iterator, is_key_compare_to::value> internal_locate(
+ const K &key) const;
+
+ // Internal routine which implements lower_bound().
+ template <typename K>
SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
const K &key) const;
-
- // Internal routine which implements upper_bound().
- template <typename K>
- iterator internal_upper_bound(const K &key) const;
-
- // Internal routine which implements find().
- template <typename K>
- iterator internal_find(const K &key) const;
-
- // Verifies the tree structure of node.
+
+ // Internal routine which implements upper_bound().
+ template <typename K>
+ iterator internal_upper_bound(const K &key) const;
+
+ // Internal routine which implements find().
+ template <typename K>
+ iterator internal_find(const K &key) const;
+
+ // Verifies the tree structure of node.
int internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const;
-
- node_stats internal_stats(const node_type *node) const {
- // The root can be a static empty node.
- if (node == nullptr || (node == root() && empty())) {
- return node_stats(0, 0);
- }
- if (node->leaf()) {
- return node_stats(1, 0);
- }
- node_stats res(0, 1);
+
+ node_stats internal_stats(const node_type *node) const {
+ // The root can be a static empty node.
+ if (node == nullptr || (node == root() && empty())) {
+ return node_stats(0, 0);
+ }
+ if (node->leaf()) {
+ return node_stats(1, 0);
+ }
+ node_stats res(0, 1);
for (int i = node->start(); i <= node->finish(); ++i) {
- res += internal_stats(node->child(i));
- }
- return res;
- }
-
- // We use compressed tuple in order to save space because key_compare and
- // allocator_type are usually empty.
- absl::container_internal::CompressedTuple<key_compare, allocator_type,
- node_type *>
- root_;
-
- // A pointer to the rightmost node. Note that the leftmost node is stored as
- // the root's parent.
- node_type *rightmost_;
-
- // Number of values.
- size_type size_;
-};
-
-////
-// btree_node methods
-template <typename P>
-template <typename... Args>
-inline void btree_node<P>::emplace_value(const size_type i,
- allocator_type *alloc,
- Args &&... args) {
+ res += internal_stats(node->child(i));
+ }
+ return res;
+ }
+
+ // We use compressed tuple in order to save space because key_compare and
+ // allocator_type are usually empty.
+ absl::container_internal::CompressedTuple<key_compare, allocator_type,
+ node_type *>
+ root_;
+
+ // A pointer to the rightmost node. Note that the leftmost node is stored as
+ // the root's parent.
+ node_type *rightmost_;
+
+ // Number of values.
+ size_type size_;
+};
+
+////
+// btree_node methods
+template <typename P>
+template <typename... Args>
+inline void btree_node<P>::emplace_value(const size_type i,
+ allocator_type *alloc,
+ Args &&... args) {
assert(i >= start());
assert(i <= finish());
- // Shift old values to create space for new value and then construct it in
- // place.
+ // Shift old values to create space for new value and then construct it in
+ // place.
if (i < finish()) {
transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
alloc);
- }
- value_init(i, alloc, std::forward<Args>(args)...);
+ }
+ value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
-
+
if (!leaf() && finish() > i + 1) {
for (int j = finish(); j > i + 1; --j) {
- set_child(j, child(j - 1));
- }
- clear_child(i + 1);
- }
-}
-
-template <typename P>
+ set_child(j, child(j - 1));
+ }
+ clear_child(i + 1);
+ }
+}
+
+template <typename P>
inline void btree_node<P>::remove_values(const field_type i,
const field_type to_erase,
allocator_type *alloc) {
@@ -1618,184 +1618,184 @@ inline void btree_node<P>::remove_values(const field_type i,
// Delete all children between begin and end.
for (int j = 0; j < to_erase; ++j) {
clear_and_delete(child(i + j + 1), alloc);
- }
+ }
// Rotate children after end into new positions.
for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
set_child(j - to_erase, child(j));
clear_child(j);
}
- }
+ }
set_finish(orig_finish - to_erase);
-}
-
-template <typename P>
-void btree_node<P>::rebalance_right_to_left(const int to_move,
- btree_node *right,
- allocator_type *alloc) {
- assert(parent() == right->parent());
- assert(position() + 1 == right->position());
- assert(right->count() >= count());
- assert(to_move >= 1);
- assert(to_move <= right->count());
-
- // 1) Move the delimiting value in the parent to the left node.
+}
+
+template <typename P>
+void btree_node<P>::rebalance_right_to_left(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(right->count() >= count());
+ assert(to_move >= 1);
+ assert(to_move <= right->count());
+
+ // 1) Move the delimiting value in the parent to the left node.
transfer(finish(), position(), parent(), alloc);
-
- // 2) Move the (to_move - 1) values from the right node to the left node.
+
+ // 2) Move the (to_move - 1) values from the right node to the left node.
transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
-
- // 3) Move the new delimiting value to the parent from the right node.
+
+ // 3) Move the new delimiting value to the parent from the right node.
parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
-
+
// 4) Shift the values in the right node to their correct positions.
right->transfer_n(right->count() - to_move, right->start(),
right->start() + to_move, right, alloc);
-
- if (!leaf()) {
- // Move the child pointers from the right to the left node.
- for (int i = 0; i < to_move; ++i) {
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
+ for (int i = 0; i < to_move; ++i) {
init_child(finish() + i + 1, right->child(i));
- }
+ }
for (int i = right->start(); i <= right->finish() - to_move; ++i) {
- assert(i + to_move <= right->max_count());
- right->init_child(i, right->child(i + to_move));
- right->clear_child(i + to_move);
- }
- }
-
+ assert(i + to_move <= right->max_count());
+ right->init_child(i, right->child(i + to_move));
+ right->clear_child(i + to_move);
+ }
+ }
+
// Fixup `finish` on the left and right nodes.
set_finish(finish() + to_move);
right->set_finish(right->finish() - to_move);
-}
-
-template <typename P>
-void btree_node<P>::rebalance_left_to_right(const int to_move,
- btree_node *right,
- allocator_type *alloc) {
- assert(parent() == right->parent());
- assert(position() + 1 == right->position());
- assert(count() >= right->count());
- assert(to_move >= 1);
- assert(to_move <= count());
-
- // Values in the right node are shifted to the right to make room for the
- // new to_move values. Then, the delimiting value in the parent and the
- // other (to_move - 1) values in the left node are moved into the right node.
- // Lastly, a new delimiting value is moved from the left node into the
- // parent, and the remaining empty left node entries are destroyed.
-
+}
+
+template <typename P>
+void btree_node<P>::rebalance_left_to_right(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(count() >= right->count());
+ assert(to_move >= 1);
+ assert(to_move <= count());
+
+ // Values in the right node are shifted to the right to make room for the
+ // new to_move values. Then, the delimiting value in the parent and the
+ // other (to_move - 1) values in the left node are moved into the right node.
+ // Lastly, a new delimiting value is moved from the left node into the
+ // parent, and the remaining empty left node entries are destroyed.
+
// 1) Shift existing values in the right node to their correct positions.
right->transfer_n_backward(right->count(), right->start() + to_move,
right->start(), right, alloc);
-
+
// 2) Move the delimiting value in the parent to the right node.
right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
-
+
// 3) Move the (to_move - 1) values from the left node to the right node.
right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
alloc);
-
- // 4) Move the new delimiting value to the parent from the left node.
+
+ // 4) Move the new delimiting value to the parent from the left node.
parent()->transfer(position(), finish() - to_move, this, alloc);
-
- if (!leaf()) {
- // Move the child pointers from the left to the right node.
+
+ if (!leaf()) {
+ // Move the child pointers from the left to the right node.
for (int i = right->finish(); i >= right->start(); --i) {
- right->init_child(i + to_move, right->child(i));
- right->clear_child(i);
- }
- for (int i = 1; i <= to_move; ++i) {
+ right->init_child(i + to_move, right->child(i));
+ right->clear_child(i);
+ }
+ for (int i = 1; i <= to_move; ++i) {
right->init_child(i - 1, child(finish() - to_move + i));
clear_child(finish() - to_move + i);
- }
- }
-
- // Fixup the counts on the left and right nodes.
+ }
+ }
+
+ // Fixup the counts on the left and right nodes.
set_finish(finish() - to_move);
right->set_finish(right->finish() + to_move);
-}
-
-template <typename P>
-void btree_node<P>::split(const int insert_position, btree_node *dest,
- allocator_type *alloc) {
- assert(dest->count() == 0);
+}
+
+template <typename P>
+void btree_node<P>::split(const int insert_position, btree_node *dest,
+ allocator_type *alloc) {
+ assert(dest->count() == 0);
assert(max_count() == kNodeSlots);
-
- // We bias the split based on the position being inserted. If we're
- // inserting at the beginning of the left node then bias the split to put
- // more values on the right node. If we're inserting at the end of the
- // right node then bias the split to put more values on the left node.
+
+ // We bias the split based on the position being inserted. If we're
+ // inserting at the beginning of the left node then bias the split to put
+ // more values on the right node. If we're inserting at the end of the
+ // right node then bias the split to put more values on the left node.
if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1);
} else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start());
- } else {
+ } else {
dest->set_finish(dest->start() + count() / 2);
- }
+ }
set_finish(finish() - dest->count());
- assert(count() >= 1);
-
- // Move values from the left sibling to the right sibling.
+ assert(count() >= 1);
+
+ // Move values from the left sibling to the right sibling.
dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
-
- // The split key is the largest value in the left sibling.
+
+ // The split key is the largest value in the left sibling.
--mutable_finish();
parent()->emplace_value(position(), alloc, finish_slot());
value_destroy(finish(), alloc);
- parent()->init_child(position() + 1, dest);
-
- if (!leaf()) {
+ parent()->init_child(position() + 1, dest);
+
+ if (!leaf()) {
for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
++i, ++j) {
assert(child(j) != nullptr);
dest->init_child(i, child(j));
clear_child(j);
- }
- }
-}
-
-template <typename P>
-void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
- assert(parent() == src->parent());
- assert(position() + 1 == src->position());
-
- // Move the delimiting value to the left node.
+ }
+ }
+}
+
+template <typename P>
+void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
+ assert(parent() == src->parent());
+ assert(position() + 1 == src->position());
+
+ // Move the delimiting value to the left node.
value_init(finish(), alloc, parent()->slot(position()));
-
- // Move the values from the right to the left node.
+
+ // Move the values from the right to the left node.
transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
-
- if (!leaf()) {
- // Move the child pointers from the right to the left node.
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
init_child(j, src->child(i));
- src->clear_child(i);
- }
- }
-
+ src->clear_child(i);
+ }
+ }
+
// Fixup `finish` on the src and dest nodes.
set_finish(start() + 1 + count() + src->count());
src->set_finish(src->start());
-
+
// Remove the value on the parent node and delete the src node.
parent()->remove_values(position(), /*to_erase=*/1, alloc);
-}
-
-template <typename P>
+}
+
+template <typename P>
void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
if (node->leaf()) {
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(LeafSize(node->max_count()), node, alloc);
return;
- }
+ }
if (node->count() == 0) {
deallocate(InternalSize(), node, alloc);
return;
- }
-
+ }
+
// The parent of the root of the subtree we are deleting.
btree_node *delete_root_parent = node->parent();
-
+
// Navigate to the leftmost leaf under node, and then delete upwards.
while (!node->leaf()) node = node->start_child();
// Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
@@ -1829,114 +1829,114 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
if (parent == delete_root_parent) return;
++pos;
} while (pos > parent->finish());
- }
-}
-
-////
-// btree_iterator methods
-template <typename N, typename R, typename P>
-void btree_iterator<N, R, P>::increment_slow() {
- if (node->leaf()) {
+ }
+}
+
+////
+// btree_iterator methods
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::increment_slow() {
+ if (node->leaf()) {
assert(position >= node->finish());
- btree_iterator save(*this);
+ btree_iterator save(*this);
while (position == node->finish() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position();
- node = node->parent();
- }
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position();
+ node = node->parent();
+ }
// TODO(ezb): assert we aren't incrementing end() instead of handling.
if (position == node->finish()) {
- *this = save;
- }
- } else {
+ *this = save;
+ }
+ } else {
assert(position < node->finish());
- node = node->child(position + 1);
- while (!node->leaf()) {
+ node = node->child(position + 1);
+ while (!node->leaf()) {
node = node->start_child();
- }
+ }
position = node->start();
- }
-}
-
-template <typename N, typename R, typename P>
-void btree_iterator<N, R, P>::decrement_slow() {
- if (node->leaf()) {
- assert(position <= -1);
- btree_iterator save(*this);
+ }
+}
+
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::decrement_slow() {
+ if (node->leaf()) {
+ assert(position <= -1);
+ btree_iterator save(*this);
while (position < node->start() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position() - 1;
- node = node->parent();
- }
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position() - 1;
+ node = node->parent();
+ }
// TODO(ezb): assert we aren't decrementing begin() instead of handling.
if (position < node->start()) {
- *this = save;
- }
- } else {
+ *this = save;
+ }
+ } else {
assert(position >= node->start());
- node = node->child(position);
- while (!node->leaf()) {
+ node = node->child(position);
+ while (!node->leaf()) {
node = node->child(node->finish());
- }
+ }
position = node->finish() - 1;
- }
-}
-
-////
-// btree methods
-template <typename P>
-template <typename Btree>
+ }
+}
+
+////
+// btree methods
+template <typename P>
+template <typename Btree>
void btree<P>::copy_or_move_values_in_order(Btree &other) {
- static_assert(std::is_same<btree, Btree>::value ||
- std::is_same<const btree, Btree>::value,
- "Btree type must be same or const.");
- assert(empty());
-
- // We can avoid key comparisons because we know the order of the
- // values is the same order we'll store them in.
+ static_assert(std::is_same<btree, Btree>::value ||
+ std::is_same<const btree, Btree>::value,
+ "Btree type must be same or const.");
+ assert(empty());
+
+ // We can avoid key comparisons because we know the order of the
+ // values is the same order we'll store them in.
auto iter = other.begin();
if (iter == other.end()) return;
- insert_multi(maybe_move_from_iterator(iter));
- ++iter;
+ insert_multi(maybe_move_from_iterator(iter));
+ ++iter;
for (; iter != other.end(); ++iter) {
- // If the btree is not empty, we can just insert the new value at the end
- // of the tree.
- internal_emplace(end(), maybe_move_from_iterator(iter));
- }
-}
-
-template <typename P>
-constexpr bool btree<P>::static_assert_validation() {
- static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
- "Key comparison must be nothrow copy constructible");
- static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
- "Allocator must be nothrow copy constructible");
- static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
- "iterator not trivially copyable.");
-
- // Note: We assert that kTargetValues, which is computed from
- // Params::kTargetNodeSize, must fit the node_type::field_type.
- static_assert(
+ // If the btree is not empty, we can just insert the new value at the end
+ // of the tree.
+ internal_emplace(end(), maybe_move_from_iterator(iter));
+ }
+}
+
+template <typename P>
+constexpr bool btree<P>::static_assert_validation() {
+ static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
+ "Key comparison must be nothrow copy constructible");
+ static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
+ "Allocator must be nothrow copy constructible");
+ static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+ "iterator not trivially copyable.");
+
+ // Note: We assert that kTargetValues, which is computed from
+ // Params::kTargetNodeSize, must fit the node_type::field_type.
+ static_assert(
kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
- "target node size too large");
-
- // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
- using compare_result_type =
- absl::result_of_t<key_compare(key_type, key_type)>;
- static_assert(
- std::is_same<compare_result_type, bool>::value ||
- std::is_convertible<compare_result_type, absl::weak_ordering>::value,
- "key comparison function must return absl::{weak,strong}_ordering or "
- "bool.");
-
- // Test the assumption made in setting kNodeValueSpace.
- static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
- "node space assumption incorrect");
-
- return true;
-}
-
-template <typename P>
+ "target node size too large");
+
+ // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
+ using compare_result_type =
+ absl::result_of_t<key_compare(key_type, key_type)>;
+ static_assert(
+ std::is_same<compare_result_type, bool>::value ||
+ std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+ "key comparison function must return absl::{weak,strong}_ordering or "
+ "bool.");
+
+ // Test the assumption made in setting kNodeValueSpace.
+ static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
+ "node space assumption incorrect");
+
+ return true;
+}
+
+template <typename P>
template <typename K>
auto btree<P>::lower_bound_equal(const K &key) const
-> std::pair<iterator, bool> {
@@ -1947,9 +1947,9 @@ auto btree<P>::lower_bound_equal(const K &key) const
? res.IsEq()
: lower != end() && !compare_keys(key, lower.key());
return {lower, equal};
-}
-
-template <typename P>
+}
+
+template <typename P>
template <typename K>
auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
@@ -1981,63 +1981,63 @@ auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
template <typename P>
template <typename K, typename... Args>
auto btree<P>::insert_unique(const K &key, Args &&... args)
- -> std::pair<iterator, bool> {
- if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
- }
-
+ -> std::pair<iterator, bool> {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
iterator iter = res.value;
-
- if (res.HasMatch()) {
- if (res.IsEq()) {
- // The key already exists in the tree, do nothing.
- return {iter, false};
- }
- } else {
- iterator last = internal_last(iter);
- if (last.node && !compare_keys(key, last.key())) {
- // The key already exists in the tree, do nothing.
- return {last, false};
- }
- }
- return {internal_emplace(iter, std::forward<Args>(args)...), true};
-}
-
-template <typename P>
+
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ // The key already exists in the tree, do nothing.
+ return {iter, false};
+ }
+ } else {
+ iterator last = internal_last(iter);
+ if (last.node && !compare_keys(key, last.key())) {
+ // The key already exists in the tree, do nothing.
+ return {last, false};
+ }
+ }
+ return {internal_emplace(iter, std::forward<Args>(args)...), true};
+}
+
+template <typename P>
template <typename K, typename... Args>
inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
- Args &&... args)
- -> std::pair<iterator, bool> {
- if (!empty()) {
- if (position == end() || compare_keys(key, position.key())) {
+ Args &&... args)
+ -> std::pair<iterator, bool> {
+ if (!empty()) {
+ if (position == end() || compare_keys(key, position.key())) {
if (position == begin() || compare_keys(std::prev(position).key(), key)) {
- // prev.key() < key < position.key()
- return {internal_emplace(position, std::forward<Args>(args)...), true};
- }
- } else if (compare_keys(position.key(), key)) {
- ++position;
- if (position == end() || compare_keys(key, position.key())) {
- // {original `position`}.key() < key < {current `position`}.key()
- return {internal_emplace(position, std::forward<Args>(args)...), true};
- }
- } else {
- // position.key() == key
- return {position, false};
- }
- }
- return insert_unique(key, std::forward<Args>(args)...);
-}
-
-template <typename P>
+ // prev.key() < key < position.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else if (compare_keys(position.key(), key)) {
+ ++position;
+ if (position == end() || compare_keys(key, position.key())) {
+ // {original `position`}.key() < key < {current `position`}.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else {
+ // position.key() == key
+ return {position, false};
+ }
+ }
+ return insert_unique(key, std::forward<Args>(args)...);
+}
+
+template <typename P>
template <typename InputIterator, typename>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
- for (; b != e; ++b) {
- insert_hint_unique(end(), params_type::key(*b), *b);
- }
-}
-
-template <typename P>
+ for (; b != e; ++b) {
+ insert_hint_unique(end(), params_type::key(*b), *b);
+ }
+}
+
+template <typename P>
template <typename InputIterator>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
for (; b != e; ++b) {
@@ -2047,464 +2047,464 @@ void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
}
template <typename P>
-template <typename ValueType>
-auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
- if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
- }
-
- iterator iter = internal_upper_bound(key);
- if (iter.node == nullptr) {
- iter = end();
- }
- return internal_emplace(iter, std::forward<ValueType>(v));
-}
-
-template <typename P>
-template <typename ValueType>
-auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
- if (!empty()) {
- const key_type &key = params_type::key(v);
- if (position == end() || !compare_keys(position.key(), key)) {
+template <typename ValueType>
+auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
+ iterator iter = internal_upper_bound(key);
+ if (iter.node == nullptr) {
+ iter = end();
+ }
+ return internal_emplace(iter, std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
+ if (!empty()) {
+ const key_type &key = params_type::key(v);
+ if (position == end() || !compare_keys(position.key(), key)) {
if (position == begin() ||
!compare_keys(key, std::prev(position).key())) {
- // prev.key() <= key <= position.key()
- return internal_emplace(position, std::forward<ValueType>(v));
- }
- } else {
+ // prev.key() <= key <= position.key()
+ return internal_emplace(position, std::forward<ValueType>(v));
+ }
+ } else {
++position;
if (position == end() || !compare_keys(position.key(), key)) {
// {original `position`}.key() < key < {current `position`}.key()
return internal_emplace(position, std::forward<ValueType>(v));
- }
- }
- }
- return insert_multi(std::forward<ValueType>(v));
-}
-
-template <typename P>
-template <typename InputIterator>
-void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
- for (; b != e; ++b) {
- insert_hint_multi(end(), *b);
- }
-}
-
-template <typename P>
+ }
+ }
+ }
+ return insert_multi(std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert_hint_multi(end(), *b);
+ }
+}
+
+template <typename P>
auto btree<P>::operator=(const btree &other) -> btree & {
if (this != &other) {
- clear();
-
+ clear();
+
*mutable_key_comp() = other.key_comp();
- if (absl::allocator_traits<
- allocator_type>::propagate_on_container_copy_assignment::value) {
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
*mutable_allocator() = other.allocator();
- }
-
+ }
+
copy_or_move_values_in_order(other);
- }
- return *this;
-}
-
-template <typename P>
+ }
+ return *this;
+}
+
+template <typename P>
auto btree<P>::operator=(btree &&other) noexcept -> btree & {
if (this != &other) {
- clear();
-
- using std::swap;
- if (absl::allocator_traits<
- allocator_type>::propagate_on_container_copy_assignment::value) {
- // Note: `root_` also contains the allocator and the key comparator.
+ clear();
+
+ using std::swap;
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
swap(root_, other.root_);
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
- } else {
+ } else {
if (allocator() == other.allocator()) {
swap(mutable_root(), other.mutable_root());
swap(*mutable_key_comp(), *other.mutable_key_comp());
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
- } else {
- // We aren't allowed to propagate the allocator and the allocator is
- // different so we can't take over its memory. We must move each element
+ } else {
+ // We aren't allowed to propagate the allocator and the allocator is
+ // different so we can't take over its memory. We must move each element
// individually. We need both `other` and `this` to have `other`s key
// comparator while moving the values so we can't swap the key
// comparators.
*mutable_key_comp() = other.key_comp();
copy_or_move_values_in_order(other);
- }
- }
- }
- return *this;
-}
-
-template <typename P>
-auto btree<P>::erase(iterator iter) -> iterator {
- bool internal_delete = false;
- if (!iter.node->leaf()) {
- // Deletion of a value on an internal node. First, move the largest value
+ }
+ }
+ }
+ return *this;
+}
+
+template <typename P>
+auto btree<P>::erase(iterator iter) -> iterator {
+ bool internal_delete = false;
+ if (!iter.node->leaf()) {
+ // Deletion of a value on an internal node. First, move the largest value
// from our left child here, then delete that position (in remove_values()
- // below). We can get to the largest value from our left child by
- // decrementing iter.
- iterator internal_iter(iter);
- --iter;
- assert(iter.node->leaf());
- params_type::move(mutable_allocator(), iter.node->slot(iter.position),
- internal_iter.node->slot(internal_iter.position));
- internal_delete = true;
- }
-
- // Delete the key from the leaf.
+ // below). We can get to the largest value from our left child by
+ // decrementing iter.
+ iterator internal_iter(iter);
+ --iter;
+ assert(iter.node->leaf());
+ params_type::move(mutable_allocator(), iter.node->slot(iter.position),
+ internal_iter.node->slot(internal_iter.position));
+ internal_delete = true;
+ }
+
+ // Delete the key from the leaf.
iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
- --size_;
-
- // We want to return the next value after the one we just erased. If we
- // erased from an internal node (internal_delete == true), then the next
- // value is ++(++iter). If we erased from a leaf node (internal_delete ==
- // false) then the next value is ++iter. Note that ++iter may point to an
- // internal node and the value in the internal node may move to a leaf node
- // (iter.node) when rebalancing is performed at the leaf level.
-
- iterator res = rebalance_after_delete(iter);
-
- // If we erased from an internal node, advance the iterator.
- if (internal_delete) {
- ++res;
- }
- return res;
-}
-
-template <typename P>
-auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
- // Merge/rebalance as we walk back up the tree.
- iterator res(iter);
- bool first_iteration = true;
- for (;;) {
- if (iter.node == root()) {
- try_shrink();
- if (empty()) {
- return end();
- }
- break;
- }
- if (iter.node->count() >= kMinNodeValues) {
- break;
- }
- bool merged = try_merge_or_rebalance(&iter);
- // On the first iteration, we should update `res` with `iter` because `res`
- // may have been invalidated.
- if (first_iteration) {
- res = iter;
- first_iteration = false;
- }
- if (!merged) {
- break;
- }
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
- }
-
- // Adjust our return value. If we're pointing at the end of a node, advance
- // the iterator.
+ --size_;
+
+ // We want to return the next value after the one we just erased. If we
+ // erased from an internal node (internal_delete == true), then the next
+ // value is ++(++iter). If we erased from a leaf node (internal_delete ==
+ // false) then the next value is ++iter. Note that ++iter may point to an
+ // internal node and the value in the internal node may move to a leaf node
+ // (iter.node) when rebalancing is performed at the leaf level.
+
+ iterator res = rebalance_after_delete(iter);
+
+ // If we erased from an internal node, advance the iterator.
+ if (internal_delete) {
+ ++res;
+ }
+ return res;
+}
+
+template <typename P>
+auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
+ // Merge/rebalance as we walk back up the tree.
+ iterator res(iter);
+ bool first_iteration = true;
+ for (;;) {
+ if (iter.node == root()) {
+ try_shrink();
+ if (empty()) {
+ return end();
+ }
+ break;
+ }
+ if (iter.node->count() >= kMinNodeValues) {
+ break;
+ }
+ bool merged = try_merge_or_rebalance(&iter);
+ // On the first iteration, we should update `res` with `iter` because `res`
+ // may have been invalidated.
+ if (first_iteration) {
+ res = iter;
+ first_iteration = false;
+ }
+ if (!merged) {
+ break;
+ }
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ }
+
+ // Adjust our return value. If we're pointing at the end of a node, advance
+ // the iterator.
if (res.position == res.node->finish()) {
res.position = res.node->finish() - 1;
- ++res;
- }
-
- return res;
-}
-
-template <typename P>
+ ++res;
+ }
+
+ return res;
+}
+
+template <typename P>
auto btree<P>::erase_range(iterator begin, iterator end)
- -> std::pair<size_type, iterator> {
- difference_type count = std::distance(begin, end);
- assert(count >= 0);
-
- if (count == 0) {
- return {0, begin};
- }
-
- if (count == size_) {
- clear();
- return {count, this->end()};
- }
-
- if (begin.node == end.node) {
+ -> std::pair<size_type, iterator> {
+ difference_type count = std::distance(begin, end);
+ assert(count >= 0);
+
+ if (count == 0) {
+ return {0, begin};
+ }
+
+ if (count == size_) {
+ clear();
+ return {count, this->end()};
+ }
+
+ if (begin.node == end.node) {
assert(end.position > begin.position);
begin.node->remove_values(begin.position, end.position - begin.position,
mutable_allocator());
- size_ -= count;
- return {count, rebalance_after_delete(begin)};
- }
-
- const size_type target_size = size_ - count;
- while (size_ > target_size) {
- if (begin.node->leaf()) {
- const size_type remaining_to_erase = size_ - target_size;
+ size_ -= count;
+ return {count, rebalance_after_delete(begin)};
+ }
+
+ const size_type target_size = size_ - count;
+ while (size_ > target_size) {
+ if (begin.node->leaf()) {
+ const size_type remaining_to_erase = size_ - target_size;
const size_type remaining_in_node = begin.node->finish() - begin.position;
const size_type to_erase =
(std::min)(remaining_to_erase, remaining_in_node);
begin.node->remove_values(begin.position, to_erase, mutable_allocator());
size_ -= to_erase;
begin = rebalance_after_delete(begin);
- } else {
- begin = erase(begin);
- }
- }
- return {count, begin};
-}
-
-template <typename P>
-void btree<P>::clear() {
- if (!empty()) {
+ } else {
+ begin = erase(begin);
+ }
+ }
+ return {count, begin};
+}
+
+template <typename P>
+void btree<P>::clear() {
+ if (!empty()) {
node_type::clear_and_delete(root(), mutable_allocator());
- }
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
- size_ = 0;
-}
-
-template <typename P>
+ }
+ mutable_root() = EmptyNode();
+ rightmost_ = EmptyNode();
+ size_ = 0;
+}
+
+template <typename P>
void btree<P>::swap(btree &other) {
- using std::swap;
- if (absl::allocator_traits<
- allocator_type>::propagate_on_container_swap::value) {
- // Note: `root_` also contains the allocator and the key comparator.
+ using std::swap;
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_swap::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
swap(root_, other.root_);
- } else {
- // It's undefined behavior if the allocators are unequal here.
+ } else {
+ // It's undefined behavior if the allocators are unequal here.
assert(allocator() == other.allocator());
swap(mutable_root(), other.mutable_root());
swap(*mutable_key_comp(), *other.mutable_key_comp());
- }
+ }
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
-}
-
-template <typename P>
-void btree<P>::verify() const {
- assert(root() != nullptr);
- assert(leftmost() != nullptr);
- assert(rightmost_ != nullptr);
- assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
- assert(leftmost() == (++const_iterator(root(), -1)).node);
+}
+
+template <typename P>
+void btree<P>::verify() const {
+ assert(root() != nullptr);
+ assert(leftmost() != nullptr);
+ assert(rightmost_ != nullptr);
+ assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
+ assert(leftmost() == (++const_iterator(root(), -1)).node);
assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
- assert(leftmost()->leaf());
- assert(rightmost_->leaf());
-}
-
-template <typename P>
-void btree<P>::rebalance_or_split(iterator *iter) {
- node_type *&node = iter->node;
- int &insert_position = iter->position;
- assert(node->count() == node->max_count());
+ assert(leftmost()->leaf());
+ assert(rightmost_->leaf());
+}
+
+template <typename P>
+void btree<P>::rebalance_or_split(iterator *iter) {
+ node_type *&node = iter->node;
+ int &insert_position = iter->position;
+ assert(node->count() == node->max_count());
assert(kNodeSlots == node->max_count());
-
- // First try to make room on the node by rebalancing.
- node_type *parent = node->parent();
- if (node != root()) {
+
+ // First try to make room on the node by rebalancing.
+ node_type *parent = node->parent();
+ if (node != root()) {
if (node->position() > parent->start()) {
- // Try rebalancing with our left sibling.
- node_type *left = parent->child(node->position() - 1);
+ // Try rebalancing with our left sibling.
+ node_type *left = parent->child(node->position() - 1);
assert(left->max_count() == kNodeSlots);
if (left->count() < kNodeSlots) {
- // We bias rebalancing based on the position being inserted. If we're
- // inserting at the end of the right node then we bias rebalancing to
- // fill up the left node.
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the end of the right node then we bias rebalancing to
+ // fill up the left node.
int to_move = (kNodeSlots - left->count()) /
(1 + (insert_position < static_cast<int>(kNodeSlots)));
- to_move = (std::max)(1, to_move);
-
+ to_move = (std::max)(1, to_move);
+
if (insert_position - to_move >= node->start() ||
left->count() + to_move < static_cast<int>(kNodeSlots)) {
- left->rebalance_right_to_left(to_move, node, mutable_allocator());
-
- assert(node->max_count() - node->count() == to_move);
- insert_position = insert_position - to_move;
+ left->rebalance_right_to_left(to_move, node, mutable_allocator());
+
+ assert(node->max_count() - node->count() == to_move);
+ insert_position = insert_position - to_move;
if (insert_position < node->start()) {
- insert_position = insert_position + left->count() + 1;
- node = left;
- }
-
- assert(node->count() < node->max_count());
- return;
- }
- }
- }
-
+ insert_position = insert_position + left->count() + 1;
+ node = left;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
if (node->position() < parent->finish()) {
- // Try rebalancing with our right sibling.
- node_type *right = parent->child(node->position() + 1);
+ // Try rebalancing with our right sibling.
+ node_type *right = parent->child(node->position() + 1);
assert(right->max_count() == kNodeSlots);
if (right->count() < kNodeSlots) {
- // We bias rebalancing based on the position being inserted. If we're
- // inserting at the beginning of the left node then we bias rebalancing
- // to fill up the right node.
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the beginning of the left node then we bias rebalancing
+ // to fill up the right node.
int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
(1 + (insert_position > node->start()));
- to_move = (std::max)(1, to_move);
-
+ to_move = (std::max)(1, to_move);
+
if (insert_position <= node->finish() - to_move ||
right->count() + to_move < static_cast<int>(kNodeSlots)) {
- node->rebalance_left_to_right(to_move, right, mutable_allocator());
-
+ node->rebalance_left_to_right(to_move, right, mutable_allocator());
+
if (insert_position > node->finish()) {
- insert_position = insert_position - node->count() - 1;
- node = right;
- }
-
- assert(node->count() < node->max_count());
- return;
- }
- }
- }
-
- // Rebalancing failed, make sure there is room on the parent node for a new
- // value.
+ insert_position = insert_position - node->count() - 1;
+ node = right;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
+ // Rebalancing failed, make sure there is room on the parent node for a new
+ // value.
assert(parent->max_count() == kNodeSlots);
if (parent->count() == kNodeSlots) {
- iterator parent_iter(node->parent(), node->position());
- rebalance_or_split(&parent_iter);
- }
- } else {
- // Rebalancing not possible because this is the root node.
- // Create a new root node and set the current root node as the child of the
- // new root.
- parent = new_internal_node(parent);
+ iterator parent_iter(node->parent(), node->position());
+ rebalance_or_split(&parent_iter);
+ }
+ } else {
+ // Rebalancing not possible because this is the root node.
+ // Create a new root node and set the current root node as the child of the
+ // new root.
+ parent = new_internal_node(parent);
parent->init_child(parent->start(), root());
- mutable_root() = parent;
- // If the former root was a leaf node, then it's now the rightmost node.
+ mutable_root() = parent;
+ // If the former root was a leaf node, then it's now the rightmost node.
assert(!parent->start_child()->leaf() ||
parent->start_child() == rightmost_);
- }
-
- // Split the node.
- node_type *split_node;
- if (node->leaf()) {
- split_node = new_leaf_node(parent);
- node->split(insert_position, split_node, mutable_allocator());
- if (rightmost_ == node) rightmost_ = split_node;
- } else {
- split_node = new_internal_node(parent);
- node->split(insert_position, split_node, mutable_allocator());
- }
-
+ }
+
+ // Split the node.
+ node_type *split_node;
+ if (node->leaf()) {
+ split_node = new_leaf_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ if (rightmost_ == node) rightmost_ = split_node;
+ } else {
+ split_node = new_internal_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ }
+
if (insert_position > node->finish()) {
- insert_position = insert_position - node->count() - 1;
- node = split_node;
- }
-}
-
-template <typename P>
-void btree<P>::merge_nodes(node_type *left, node_type *right) {
- left->merge(right, mutable_allocator());
+ insert_position = insert_position - node->count() - 1;
+ node = split_node;
+ }
+}
+
+template <typename P>
+void btree<P>::merge_nodes(node_type *left, node_type *right) {
+ left->merge(right, mutable_allocator());
if (rightmost_ == right) rightmost_ = left;
-}
-
-template <typename P>
-bool btree<P>::try_merge_or_rebalance(iterator *iter) {
- node_type *parent = iter->node->parent();
+}
+
+template <typename P>
+bool btree<P>::try_merge_or_rebalance(iterator *iter) {
+ node_type *parent = iter->node->parent();
if (iter->node->position() > parent->start()) {
- // Try merging with our left sibling.
- node_type *left = parent->child(iter->node->position() - 1);
+ // Try merging with our left sibling.
+ node_type *left = parent->child(iter->node->position() - 1);
assert(left->max_count() == kNodeSlots);
if (1U + left->count() + iter->node->count() <= kNodeSlots) {
- iter->position += 1 + left->count();
- merge_nodes(left, iter->node);
- iter->node = left;
- return true;
- }
- }
+ iter->position += 1 + left->count();
+ merge_nodes(left, iter->node);
+ iter->node = left;
+ return true;
+ }
+ }
if (iter->node->position() < parent->finish()) {
- // Try merging with our right sibling.
- node_type *right = parent->child(iter->node->position() + 1);
+ // Try merging with our right sibling.
+ node_type *right = parent->child(iter->node->position() + 1);
assert(right->max_count() == kNodeSlots);
if (1U + iter->node->count() + right->count() <= kNodeSlots) {
- merge_nodes(iter->node, right);
- return true;
- }
- // Try rebalancing with our right sibling. We don't perform rebalancing if
- // we deleted the first element from iter->node and the node is not
- // empty. This is a small optimization for the common pattern of deleting
- // from the front of the tree.
+ merge_nodes(iter->node, right);
+ return true;
+ }
+ // Try rebalancing with our right sibling. We don't perform rebalancing if
+ // we deleted the first element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the front of the tree.
if (right->count() > kMinNodeValues &&
(iter->node->count() == 0 || iter->position > iter->node->start())) {
- int to_move = (right->count() - iter->node->count()) / 2;
- to_move = (std::min)(to_move, right->count() - 1);
- iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
- return false;
- }
- }
+ int to_move = (right->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, right->count() - 1);
+ iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+ return false;
+ }
+ }
if (iter->node->position() > parent->start()) {
- // Try rebalancing with our left sibling. We don't perform rebalancing if
- // we deleted the last element from iter->node and the node is not
- // empty. This is a small optimization for the common pattern of deleting
- // from the back of the tree.
- node_type *left = parent->child(iter->node->position() - 1);
+ // Try rebalancing with our left sibling. We don't perform rebalancing if
+ // we deleted the last element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the back of the tree.
+ node_type *left = parent->child(iter->node->position() - 1);
if (left->count() > kMinNodeValues &&
(iter->node->count() == 0 || iter->position < iter->node->finish())) {
- int to_move = (left->count() - iter->node->count()) / 2;
- to_move = (std::min)(to_move, left->count() - 1);
- left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
- iter->position += to_move;
- return false;
- }
- }
- return false;
-}
-
-template <typename P>
-void btree<P>::try_shrink() {
+ int to_move = (left->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, left->count() - 1);
+ left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
+ iter->position += to_move;
+ return false;
+ }
+ }
+ return false;
+}
+
+template <typename P>
+void btree<P>::try_shrink() {
node_type *orig_root = root();
if (orig_root->count() > 0) {
- return;
- }
- // Deleted the last item on the root node, shrink the height of the tree.
+ return;
+ }
+ // Deleted the last item on the root node, shrink the height of the tree.
if (orig_root->leaf()) {
- assert(size() == 0);
+ assert(size() == 0);
mutable_root() = rightmost_ = EmptyNode();
- } else {
+ } else {
node_type *child = orig_root->start_child();
- child->make_root();
- mutable_root() = child;
- }
+ child->make_root();
+ mutable_root() = child;
+ }
node_type::clear_and_delete(orig_root, mutable_allocator());
-}
-
-template <typename P>
-template <typename IterType>
-inline IterType btree<P>::internal_last(IterType iter) {
- assert(iter.node != nullptr);
+}
+
+template <typename P>
+template <typename IterType>
+inline IterType btree<P>::internal_last(IterType iter) {
+ assert(iter.node != nullptr);
while (iter.position == iter.node->finish()) {
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
- if (iter.node->leaf()) {
- iter.node = nullptr;
- break;
- }
- }
- return iter;
-}
-
-template <typename P>
-template <typename... Args>
-inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
- -> iterator {
- if (!iter.node->leaf()) {
- // We can't insert on an internal node. Instead, we'll insert after the
- // previous value which is guaranteed to be on a leaf node.
- --iter;
- ++iter.position;
- }
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ if (iter.node->leaf()) {
+ iter.node = nullptr;
+ break;
+ }
+ }
+ return iter;
+}
+
+template <typename P>
+template <typename... Args>
+inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
+ -> iterator {
+ if (!iter.node->leaf()) {
+ // We can't insert on an internal node. Instead, we'll insert after the
+ // previous value which is guaranteed to be on a leaf node.
+ --iter;
+ ++iter.position;
+ }
const field_type max_count = iter.node->max_count();
allocator_type *alloc = mutable_allocator();
- if (iter.node->count() == max_count) {
- // Make room in the leaf for the new item.
+ if (iter.node->count() == max_count) {
+ // Make room in the leaf for the new item.
if (max_count < kNodeSlots) {
- // Insertion into the root where the root is smaller than the full node
- // size. Simply grow the size of the root node.
- assert(iter.node == root());
- iter.node =
+ // Insertion into the root where the root is smaller than the full node
+ // size. Simply grow the size of the root node.
+ assert(iter.node == root());
+ iter.node =
new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root.
node_type *old_root = root();
@@ -2515,43 +2515,43 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
old_root->set_finish(old_root->start());
node_type::clear_and_delete(old_root, alloc);
mutable_root() = rightmost_ = new_root;
- } else {
- rebalance_or_split(&iter);
- }
- }
+ } else {
+ rebalance_or_split(&iter);
+ }
+ }
iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
- ++size_;
- return iter;
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate(const K &key) const
- -> SearchResult<iterator, is_key_compare_to::value> {
+ ++size_;
+ return iter;
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
iterator iter(const_cast<node_type *>(root()));
- for (;;) {
+ for (;;) {
SearchResult<int, is_key_compare_to::value> res =
iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
+ iter.position = res.value;
if (res.IsEq()) {
- return {iter, MatchKind::kEq};
- }
+ return {iter, MatchKind::kEq};
+ }
// Note: in the non-key-compare-to case, we don't need to walk all the way
// down the tree if the keys are equal, but determining equality would
// require doing an extra comparison on each node on the way down, and we
// will need to go all the way to the leaf node in the expected case.
- if (iter.node->leaf()) {
- break;
- }
- iter.node = iter.node->child(iter.position);
- }
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
// Note: in the non-key-compare-to case, the key may actually be equivalent
// here (and the MatchKind::kNe is ignored).
- return {iter, MatchKind::kNe};
-}
-
-template <typename P>
-template <typename K>
+ return {iter, MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
auto btree<P>::internal_lower_bound(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> {
if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
@@ -2562,80 +2562,80 @@ auto btree<P>::internal_lower_bound(const K &key) const
iterator iter(const_cast<node_type *>(root()));
SearchResult<int, is_key_compare_to::value> res;
bool seen_eq = false;
- for (;;) {
+ for (;;) {
res = iter.node->lower_bound(key, key_comp());
iter.position = res.value;
- if (iter.node->leaf()) {
- break;
- }
+ if (iter.node->leaf()) {
+ break;
+ }
seen_eq = seen_eq || res.IsEq();
- iter.node = iter.node->child(iter.position);
- }
+ iter.node = iter.node->child(iter.position);
+ }
if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
iterator iter(const_cast<node_type *>(root()));
- for (;;) {
- iter.position = iter.node->upper_bound(key, key_comp());
- if (iter.node->leaf()) {
- break;
- }
- iter.node = iter.node->child(iter.position);
- }
- return internal_last(iter);
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::internal_find(const K &key) const -> iterator {
+ for (;;) {
+ iter.position = iter.node->upper_bound(key, key_comp());
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return internal_last(iter);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_find(const K &key) const -> iterator {
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
- if (res.HasMatch()) {
- if (res.IsEq()) {
- return res.value;
- }
- } else {
- const iterator iter = internal_last(res.value);
- if (iter.node != nullptr && !compare_keys(key, iter.key())) {
- return iter;
- }
- }
- return {nullptr, 0};
-}
-
-template <typename P>
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ return res.value;
+ }
+ } else {
+ const iterator iter = internal_last(res.value);
+ if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+ return iter;
+ }
+ }
+ return {nullptr, 0};
+}
+
+template <typename P>
int btree<P>::internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const {
- assert(node->count() > 0);
- assert(node->count() <= node->max_count());
- if (lo) {
+ assert(node->count() > 0);
+ assert(node->count() <= node->max_count());
+ if (lo) {
assert(!compare_keys(node->key(node->start()), *lo));
- }
- if (hi) {
+ }
+ if (hi) {
assert(!compare_keys(*hi, node->key(node->finish() - 1)));
- }
+ }
for (int i = node->start() + 1; i < node->finish(); ++i) {
- assert(!compare_keys(node->key(i), node->key(i - 1)));
- }
- int count = node->count();
- if (!node->leaf()) {
+ assert(!compare_keys(node->key(i), node->key(i - 1)));
+ }
+ int count = node->count();
+ if (!node->leaf()) {
for (int i = node->start(); i <= node->finish(); ++i) {
- assert(node->child(i) != nullptr);
- assert(node->child(i)->parent() == node);
- assert(node->child(i)->position() == i);
+ assert(node->child(i) != nullptr);
+ assert(node->child(i)->parent() == node);
+ assert(node->child(i)->position() == i);
count += internal_verify(node->child(i),
i == node->start() ? lo : &node->key(i - 1),
i == node->finish() ? hi : &node->key(i));
- }
- }
- return count;
-}
-
-} // namespace container_internal
+ }
+ }
+ return count;
+}
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
index a99668c713..ddc44ef122 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
@@ -1,75 +1,75 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
-#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
-
-#include <algorithm>
-#include <initializer_list>
-#include <iterator>
-#include <utility>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
#include "absl/base/attributes.h"
-#include "absl/base/internal/throw_delegate.h"
-#include "absl/container/internal/btree.h" // IWYU pragma: export
-#include "absl/container/internal/common.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/btree.h" // IWYU pragma: export
+#include "absl/container/internal/common.h"
#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
-
-namespace absl {
+#include "absl/meta/type_traits.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// A common base class for btree_set, btree_map, btree_multiset, and
-// btree_multimap.
-template <typename Tree>
-class btree_container {
- using params_type = typename Tree::params_type;
-
- protected:
- // Alias used for heterogeneous lookup functions.
- // `key_arg<K>` evaluates to `K` when the functors are transparent and to
- // `key_type` otherwise. It permits template argument deduction on `K` for the
- // transparent case.
- template <class K>
- using key_arg =
- typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
- template type<K, typename Tree::key_type>;
-
- public:
- using key_type = typename Tree::key_type;
- using value_type = typename Tree::value_type;
- using size_type = typename Tree::size_type;
- using difference_type = typename Tree::difference_type;
+namespace container_internal {
+
+// A common base class for btree_set, btree_map, btree_multiset, and
+// btree_multimap.
+template <typename Tree>
+class btree_container {
+ using params_type = typename Tree::params_type;
+
+ protected:
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg =
+ typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
+ template type<K, typename Tree::key_type>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using difference_type = typename Tree::difference_type;
using key_compare = typename Tree::original_key_compare;
- using value_compare = typename Tree::value_compare;
- using allocator_type = typename Tree::allocator_type;
- using reference = typename Tree::reference;
- using const_reference = typename Tree::const_reference;
- using pointer = typename Tree::pointer;
- using const_pointer = typename Tree::const_pointer;
- using iterator = typename Tree::iterator;
- using const_iterator = typename Tree::const_iterator;
- using reverse_iterator = typename Tree::reverse_iterator;
- using const_reverse_iterator = typename Tree::const_reverse_iterator;
- using node_type = typename Tree::node_handle_type;
-
- // Constructors/assignments.
- btree_container() : tree_(key_compare(), allocator_type()) {}
- explicit btree_container(const key_compare &comp,
- const allocator_type &alloc = allocator_type())
- : tree_(comp, alloc) {}
+ using value_compare = typename Tree::value_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using reference = typename Tree::reference;
+ using const_reference = typename Tree::const_reference;
+ using pointer = typename Tree::pointer;
+ using const_pointer = typename Tree::const_pointer;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using reverse_iterator = typename Tree::reverse_iterator;
+ using const_reverse_iterator = typename Tree::const_reverse_iterator;
+ using node_type = typename Tree::node_handle_type;
+
+ // Constructors/assignments.
+ btree_container() : tree_(key_compare(), allocator_type()) {}
+ explicit btree_container(const key_compare &comp,
+ const allocator_type &alloc = allocator_type())
+ : tree_(comp, alloc) {}
explicit btree_container(const allocator_type &alloc)
: tree_(key_compare(), alloc) {}
@@ -87,339 +87,339 @@ class btree_container {
btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept(
- std::is_nothrow_move_assignable<Tree>::value) = default;
-
- // Iterator routines.
- iterator begin() { return tree_.begin(); }
- const_iterator begin() const { return tree_.begin(); }
- const_iterator cbegin() const { return tree_.begin(); }
- iterator end() { return tree_.end(); }
- const_iterator end() const { return tree_.end(); }
- const_iterator cend() const { return tree_.end(); }
- reverse_iterator rbegin() { return tree_.rbegin(); }
- const_reverse_iterator rbegin() const { return tree_.rbegin(); }
- const_reverse_iterator crbegin() const { return tree_.rbegin(); }
- reverse_iterator rend() { return tree_.rend(); }
- const_reverse_iterator rend() const { return tree_.rend(); }
- const_reverse_iterator crend() const { return tree_.rend(); }
-
- // Lookup routines.
- template <typename K = key_type>
+ std::is_nothrow_move_assignable<Tree>::value) = default;
+
+ // Iterator routines.
+ iterator begin() { return tree_.begin(); }
+ const_iterator begin() const { return tree_.begin(); }
+ const_iterator cbegin() const { return tree_.begin(); }
+ iterator end() { return tree_.end(); }
+ const_iterator end() const { return tree_.end(); }
+ const_iterator cend() const { return tree_.end(); }
+ reverse_iterator rbegin() { return tree_.rbegin(); }
+ const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+ const_reverse_iterator crbegin() const { return tree_.rbegin(); }
+ reverse_iterator rend() { return tree_.rend(); }
+ const_reverse_iterator rend() const { return tree_.rend(); }
+ const_reverse_iterator crend() const { return tree_.rend(); }
+
+ // Lookup routines.
+ template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
auto equal_range = this->equal_range(key);
return std::distance(equal_range.first, equal_range.second);
}
template <typename K = key_type>
- iterator find(const key_arg<K> &key) {
- return tree_.find(key);
- }
- template <typename K = key_type>
- const_iterator find(const key_arg<K> &key) const {
- return tree_.find(key);
- }
- template <typename K = key_type>
- bool contains(const key_arg<K> &key) const {
- return find(key) != end();
- }
- template <typename K = key_type>
- iterator lower_bound(const key_arg<K> &key) {
- return tree_.lower_bound(key);
- }
- template <typename K = key_type>
- const_iterator lower_bound(const key_arg<K> &key) const {
- return tree_.lower_bound(key);
- }
- template <typename K = key_type>
- iterator upper_bound(const key_arg<K> &key) {
- return tree_.upper_bound(key);
- }
- template <typename K = key_type>
- const_iterator upper_bound(const key_arg<K> &key) const {
- return tree_.upper_bound(key);
- }
- template <typename K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
- return tree_.equal_range(key);
- }
- template <typename K = key_type>
- std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K> &key) const {
- return tree_.equal_range(key);
- }
-
- // Deletion routines. Note that there is also a deletion routine that is
- // specific to btree_set_container/btree_multiset_container.
-
- // Erase the specified iterator from the btree. The iterator must be valid
- // (i.e. not equal to end()). Return an iterator pointing to the node after
- // the one that was erased (or end() if none exists).
- iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
- iterator erase(iterator iter) { return tree_.erase(iter); }
- iterator erase(const_iterator first, const_iterator last) {
+ iterator find(const key_arg<K> &key) {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ const_iterator find(const key_arg<K> &key) const {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ bool contains(const key_arg<K> &key) const {
+ return find(key) != end();
+ }
+ template <typename K = key_type>
+ iterator lower_bound(const key_arg<K> &key) {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator lower_bound(const key_arg<K> &key) const {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ iterator upper_bound(const key_arg<K> &key) {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator upper_bound(const key_arg<K> &key) const {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+ return tree_.equal_range(key);
+ }
+ template <typename K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K> &key) const {
+ return tree_.equal_range(key);
+ }
+
+ // Deletion routines. Note that there is also a deletion routine that is
+ // specific to btree_set_container/btree_multiset_container.
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
+ iterator erase(iterator iter) { return tree_.erase(iter); }
+ iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second;
- }
+ }
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
auto equal_range = this->equal_range(key);
return tree_.erase_range(equal_range.first, equal_range.second).first;
}
-
- // Extract routines.
- node_type extract(iterator position) {
- // Use Move instead of Transfer, because the rebalancing code expects to
- // have a valid object to scribble metadata bits on top of.
- auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
- erase(position);
- return node;
- }
- node_type extract(const_iterator position) {
- return extract(iterator(position));
- }
-
- // Utility routines.
+
+ // Extract routines.
+ node_type extract(iterator position) {
+ // Use Move instead of Transfer, because the rebalancing code expects to
+ // have a valid object to scribble metadata bits on top of.
+ auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+ erase(position);
+ return node;
+ }
+ node_type extract(const_iterator position) {
+ return extract(iterator(position));
+ }
+
+ // Utility routines.
ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); }
- void verify() const { tree_.verify(); }
-
- // Size routines.
- size_type size() const { return tree_.size(); }
- size_type max_size() const { return tree_.max_size(); }
- bool empty() const { return tree_.empty(); }
-
- friend bool operator==(const btree_container &x, const btree_container &y) {
- if (x.size() != y.size()) return false;
- return std::equal(x.begin(), x.end(), y.begin());
- }
-
- friend bool operator!=(const btree_container &x, const btree_container &y) {
- return !(x == y);
- }
-
- friend bool operator<(const btree_container &x, const btree_container &y) {
- return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end());
- }
-
- friend bool operator>(const btree_container &x, const btree_container &y) {
- return y < x;
- }
-
- friend bool operator<=(const btree_container &x, const btree_container &y) {
- return !(y < x);
- }
-
- friend bool operator>=(const btree_container &x, const btree_container &y) {
- return !(x < y);
- }
-
- // The allocator used by the btree.
- allocator_type get_allocator() const { return tree_.get_allocator(); }
-
- // The key comparator used by the btree.
+ void verify() const { tree_.verify(); }
+
+ // Size routines.
+ size_type size() const { return tree_.size(); }
+ size_type max_size() const { return tree_.max_size(); }
+ bool empty() const { return tree_.empty(); }
+
+ friend bool operator==(const btree_container &x, const btree_container &y) {
+ if (x.size() != y.size()) return false;
+ return std::equal(x.begin(), x.end(), y.begin());
+ }
+
+ friend bool operator!=(const btree_container &x, const btree_container &y) {
+ return !(x == y);
+ }
+
+ friend bool operator<(const btree_container &x, const btree_container &y) {
+ return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end());
+ }
+
+ friend bool operator>(const btree_container &x, const btree_container &y) {
+ return y < x;
+ }
+
+ friend bool operator<=(const btree_container &x, const btree_container &y) {
+ return !(y < x);
+ }
+
+ friend bool operator>=(const btree_container &x, const btree_container &y) {
+ return !(x < y);
+ }
+
+ // The allocator used by the btree.
+ allocator_type get_allocator() const { return tree_.get_allocator(); }
+
+ // The key comparator used by the btree.
key_compare key_comp() const { return key_compare(tree_.key_comp()); }
- value_compare value_comp() const { return tree_.value_comp(); }
-
- // Support absl::Hash.
- template <typename State>
- friend State AbslHashValue(State h, const btree_container &b) {
- for (const auto &v : b) {
- h = State::combine(std::move(h), v);
- }
- return State::combine(std::move(h), b.size());
- }
-
- protected:
- Tree tree_;
-};
-
-// A common base class for btree_set and btree_map.
-template <typename Tree>
-class btree_set_container : public btree_container<Tree> {
- using super_type = btree_container<Tree>;
- using params_type = typename Tree::params_type;
- using init_type = typename params_type::init_type;
- using is_key_compare_to = typename params_type::is_key_compare_to;
- friend class BtreeNodePeer;
-
- protected:
- template <class K>
- using key_arg = typename super_type::template key_arg<K>;
-
- public:
- using key_type = typename Tree::key_type;
- using value_type = typename Tree::value_type;
- using size_type = typename Tree::size_type;
+ value_compare value_comp() const { return tree_.value_comp(); }
+
+ // Support absl::Hash.
+ template <typename State>
+ friend State AbslHashValue(State h, const btree_container &b) {
+ for (const auto &v : b) {
+ h = State::combine(std::move(h), v);
+ }
+ return State::combine(std::move(h), b.size());
+ }
+
+ protected:
+ Tree tree_;
+};
+
+// A common base class for btree_set and btree_map.
+template <typename Tree>
+class btree_set_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+ friend class BtreeNodePeer;
+
+ protected:
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
using key_compare = typename Tree::original_key_compare;
- using allocator_type = typename Tree::allocator_type;
- using iterator = typename Tree::iterator;
- using const_iterator = typename Tree::const_iterator;
- using node_type = typename super_type::node_type;
- using insert_return_type = InsertReturnType<iterator, node_type>;
-
- // Inherit constructors.
- using super_type::super_type;
- btree_set_container() {}
-
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_set_container() {}
+
// Range constructors.
- template <class InputIterator>
- btree_set_container(InputIterator b, InputIterator e,
- const key_compare &comp = key_compare(),
- const allocator_type &alloc = allocator_type())
- : super_type(comp, alloc) {
- insert(b, e);
- }
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_set_container(b, e, key_compare(), alloc) {}
-
+
// Initializer list constructors.
- btree_set_container(std::initializer_list<init_type> init,
- const key_compare &comp = key_compare(),
- const allocator_type &alloc = allocator_type())
- : btree_set_container(init.begin(), init.end(), comp, alloc) {}
+ btree_set_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_set_container(init.begin(), init.end(), comp, alloc) {}
btree_set_container(std::initializer_list<init_type> init,
const allocator_type &alloc)
: btree_set_container(init.begin(), init.end(), alloc) {}
-
- // Insertion routines.
+
+ // Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) {
return this->tree_.insert_unique(params_type::key(v), v);
- }
+ }
std::pair<iterator, bool> insert(value_type &&v) {
return this->tree_.insert_unique(params_type::key(v), std::move(v));
- }
- template <typename... Args>
- std::pair<iterator, bool> emplace(Args &&... args) {
- init_type v(std::forward<Args>(args)...);
- return this->tree_.insert_unique(params_type::key(v), std::move(v));
- }
+ }
+ template <typename... Args>
+ std::pair<iterator, bool> emplace(Args &&... args) {
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ }
iterator insert(const_iterator hint, const value_type &v) {
- return this->tree_
+ return this->tree_
.insert_hint_unique(iterator(hint), params_type::key(v), v)
- .first;
- }
+ .first;
+ }
iterator insert(const_iterator hint, value_type &&v) {
- return this->tree_
+ return this->tree_
.insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
- .first;
- }
- template <typename... Args>
+ .first;
+ }
+ template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- init_type v(std::forward<Args>(args)...);
- return this->tree_
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_
.insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
- .first;
- }
- template <typename InputIterator>
- void insert(InputIterator b, InputIterator e) {
+ .first;
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
this->tree_.insert_iterator_unique(b, e, 0);
- }
- void insert(std::initializer_list<init_type> init) {
+ }
+ void insert(std::initializer_list<init_type> init) {
this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
- }
- insert_return_type insert(node_type &&node) {
- if (!node) return {this->end(), false, node_type()};
- std::pair<iterator, bool> res =
+ }
+ insert_return_type insert(node_type &&node) {
+ if (!node) return {this->end(), false, node_type()};
+ std::pair<iterator, bool> res =
this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
CommonAccess::GetSlot(node));
- if (res.second) {
+ if (res.second) {
CommonAccess::Destroy(&node);
- return {res.first, true, node_type()};
- } else {
- return {res.first, false, std::move(node)};
- }
- }
- iterator insert(const_iterator hint, node_type &&node) {
- if (!node) return this->end();
- std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
- iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
+ if (!node) return this->end();
+ std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
+ iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
CommonAccess::GetSlot(node));
if (res.second) CommonAccess::Destroy(&node);
- return res.first;
- }
-
- // Node extraction routines.
- template <typename K = key_type>
- node_type extract(const key_arg<K> &key) {
+ return res.first;
+ }
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
const std::pair<iterator, bool> lower_and_equal =
this->tree_.lower_bound_equal(key);
return lower_and_equal.second ? extract(lower_and_equal.first)
: node_type();
- }
- using super_type::extract;
-
- // Merge routines.
- // Moves elements from `src` into `this`. If the element already exists in
- // `this`, it is left unmodified in `src`.
- template <
- typename T,
- typename absl::enable_if_t<
- absl::conjunction<
- std::is_same<value_type, typename T::value_type>,
- std::is_same<allocator_type, typename T::allocator_type>,
- std::is_same<typename params_type::is_map_container,
- typename T::params_type::is_map_container>>::value,
- int> = 0>
- void merge(btree_container<T> &src) { // NOLINT
- for (auto src_it = src.begin(); src_it != src.end();) {
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves elements from `src` into `this`. If the element already exists in
+ // `this`, it is left unmodified in `src`.
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
+ for (auto src_it = src.begin(); src_it != src.end();) {
if (insert(std::move(params_type::element(src_it.slot()))).second) {
- src_it = src.erase(src_it);
- } else {
- ++src_it;
- }
- }
- }
-
- template <
- typename T,
- typename absl::enable_if_t<
- absl::conjunction<
- std::is_same<value_type, typename T::value_type>,
- std::is_same<allocator_type, typename T::allocator_type>,
- std::is_same<typename params_type::is_map_container,
- typename T::params_type::is_map_container>>::value,
- int> = 0>
- void merge(btree_container<T> &&src) {
- merge(src);
- }
-};
-
-// Base class for btree_map.
-template <typename Tree>
-class btree_map_container : public btree_set_container<Tree> {
- using super_type = btree_set_container<Tree>;
- using params_type = typename Tree::params_type;
+ src_it = src.erase(src_it);
+ } else {
+ ++src_it;
+ }
+ }
+ }
+
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// Base class for btree_map.
+template <typename Tree>
+class btree_map_container : public btree_set_container<Tree> {
+ using super_type = btree_set_container<Tree>;
+ using params_type = typename Tree::params_type;
friend class BtreeNodePeer;
-
+
private:
- template <class K>
- using key_arg = typename super_type::template key_arg<K>;
-
- public:
- using key_type = typename Tree::key_type;
- using mapped_type = typename params_type::mapped_type;
- using value_type = typename Tree::value_type;
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using mapped_type = typename params_type::mapped_type;
+ using value_type = typename Tree::value_type;
using key_compare = typename Tree::original_key_compare;
- using allocator_type = typename Tree::allocator_type;
- using iterator = typename Tree::iterator;
- using const_iterator = typename Tree::const_iterator;
-
- // Inherit constructors.
- using super_type::super_type;
- btree_map_container() {}
-
- // Insertion routines.
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_map_container() {}
+
+ // Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
template <typename K = key_type, class M>
std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
const M &obj) {
return insert_or_assign_impl(k, obj);
- }
+ }
template <typename K = key_type, class M, K * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
return insert_or_assign_impl(std::forward<K>(k), obj);
- }
+ }
template <typename K = key_type, class M, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
return insert_or_assign_impl(k, std::forward<M>(obj));
@@ -461,38 +461,38 @@ class btree_map_container : public btree_set_container<Tree> {
}
template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, const key_arg<K> &k,
- Args &&... args) {
+ Args &&... args) {
return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
- }
+ }
template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
return try_emplace_hint_impl(hint, std::forward<K>(k),
std::forward<Args>(args)...);
- }
+ }
template <typename K = key_type>
mapped_type &operator[](const key_arg<K> &k) {
- return try_emplace(k).first->second;
- }
+ return try_emplace(k).first->second;
+ }
template <typename K = key_type>
mapped_type &operator[](key_arg<K> &&k) {
return try_emplace(std::forward<K>(k)).first->second;
- }
-
- template <typename K = key_type>
- mapped_type &at(const key_arg<K> &key) {
- auto it = this->find(key);
- if (it == this->end())
- base_internal::ThrowStdOutOfRange("absl::btree_map::at");
- return it->second;
- }
- template <typename K = key_type>
- const mapped_type &at(const key_arg<K> &key) const {
- auto it = this->find(key);
- if (it == this->end())
- base_internal::ThrowStdOutOfRange("absl::btree_map::at");
- return it->second;
- }
+ }
+
+ template <typename K = key_type>
+ mapped_type &at(const key_arg<K> &key) {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+ return it->second;
+ }
+ template <typename K = key_type>
+ const mapped_type &at(const key_arg<K> &key) const {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+ return it->second;
+ }
private:
// Note: when we call `std::forward<M>(obj)` twice, it's safe because
@@ -527,157 +527,157 @@ class btree_map_container : public btree_set_container<Tree> {
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
}
-};
-
-// A common base class for btree_multiset and btree_multimap.
-template <typename Tree>
-class btree_multiset_container : public btree_container<Tree> {
- using super_type = btree_container<Tree>;
- using params_type = typename Tree::params_type;
- using init_type = typename params_type::init_type;
- using is_key_compare_to = typename params_type::is_key_compare_to;
-
- template <class K>
- using key_arg = typename super_type::template key_arg<K>;
-
- public:
- using key_type = typename Tree::key_type;
- using value_type = typename Tree::value_type;
- using size_type = typename Tree::size_type;
+};
+
+// A common base class for btree_multiset and btree_multimap.
+template <typename Tree>
+class btree_multiset_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
using key_compare = typename Tree::original_key_compare;
- using allocator_type = typename Tree::allocator_type;
- using iterator = typename Tree::iterator;
- using const_iterator = typename Tree::const_iterator;
- using node_type = typename super_type::node_type;
-
- // Inherit constructors.
- using super_type::super_type;
- btree_multiset_container() {}
-
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multiset_container() {}
+
// Range constructors.
- template <class InputIterator>
- btree_multiset_container(InputIterator b, InputIterator e,
- const key_compare &comp = key_compare(),
- const allocator_type &alloc = allocator_type())
- : super_type(comp, alloc) {
- insert(b, e);
- }
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_multiset_container(b, e, key_compare(), alloc) {}
-
+
// Initializer list constructors.
- btree_multiset_container(std::initializer_list<init_type> init,
- const key_compare &comp = key_compare(),
- const allocator_type &alloc = allocator_type())
- : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
btree_multiset_container(std::initializer_list<init_type> init,
const allocator_type &alloc)
: btree_multiset_container(init.begin(), init.end(), alloc) {}
-
- // Insertion routines.
+
+ // Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
iterator insert(value_type &&v) {
return this->tree_.insert_multi(std::move(v));
- }
+ }
iterator insert(const_iterator hint, const value_type &v) {
return this->tree_.insert_hint_multi(iterator(hint), v);
- }
+ }
iterator insert(const_iterator hint, value_type &&v) {
return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
- }
- template <typename InputIterator>
- void insert(InputIterator b, InputIterator e) {
- this->tree_.insert_iterator_multi(b, e);
- }
- void insert(std::initializer_list<init_type> init) {
- this->tree_.insert_iterator_multi(init.begin(), init.end());
- }
- template <typename... Args>
- iterator emplace(Args &&... args) {
- return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
- }
- template <typename... Args>
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ this->tree_.insert_iterator_multi(b, e);
+ }
+ void insert(std::initializer_list<init_type> init) {
+ this->tree_.insert_iterator_multi(init.begin(), init.end());
+ }
+ template <typename... Args>
+ iterator emplace(Args &&... args) {
+ return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+ }
+ template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- return this->tree_.insert_hint_multi(
+ return this->tree_.insert_hint_multi(
iterator(hint), init_type(std::forward<Args>(args)...));
- }
+ }
iterator insert(node_type &&node) {
- if (!node) return this->end();
- iterator res =
+ if (!node) return this->end();
+ iterator res =
this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
CommonAccess::GetSlot(node));
CommonAccess::Destroy(&node);
- return res;
- }
- iterator insert(const_iterator hint, node_type &&node) {
+ return res;
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
if (!node) return this->end();
iterator res = this->tree_.insert_hint_multi(
iterator(hint),
std::move(params_type::element(CommonAccess::GetSlot(node))));
CommonAccess::Destroy(&node);
return res;
- }
-
- // Node extraction routines.
- template <typename K = key_type>
- node_type extract(const key_arg<K> &key) {
+ }
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
const std::pair<iterator, bool> lower_and_equal =
this->tree_.lower_bound_equal(key);
return lower_and_equal.second ? extract(lower_and_equal.first)
: node_type();
- }
- using super_type::extract;
-
- // Merge routines.
- // Moves all elements from `src` into `this`.
- template <
- typename T,
- typename absl::enable_if_t<
- absl::conjunction<
- std::is_same<value_type, typename T::value_type>,
- std::is_same<allocator_type, typename T::allocator_type>,
- std::is_same<typename params_type::is_map_container,
- typename T::params_type::is_map_container>>::value,
- int> = 0>
- void merge(btree_container<T> &src) { // NOLINT
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves all elements from `src` into `this`.
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
insert(std::move(params_type::element(src_it.slot())));
}
- src.clear();
- }
-
- template <
- typename T,
- typename absl::enable_if_t<
- absl::conjunction<
- std::is_same<value_type, typename T::value_type>,
- std::is_same<allocator_type, typename T::allocator_type>,
- std::is_same<typename params_type::is_map_container,
- typename T::params_type::is_map_container>>::value,
- int> = 0>
- void merge(btree_container<T> &&src) {
- merge(src);
- }
-};
-
-// A base class for btree_multimap.
-template <typename Tree>
-class btree_multimap_container : public btree_multiset_container<Tree> {
- using super_type = btree_multiset_container<Tree>;
- using params_type = typename Tree::params_type;
-
- public:
- using mapped_type = typename params_type::mapped_type;
-
- // Inherit constructors.
- using super_type::super_type;
- btree_multimap_container() {}
-};
-
-} // namespace container_internal
+ src.clear();
+ }
+
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// A base class for btree_multimap.
+template <typename Tree>
+class btree_multimap_container : public btree_multiset_container<Tree> {
+ using super_type = btree_multiset_container<Tree>;
+ using params_type = typename Tree::params_type;
+
+ public:
+ using mapped_type = typename params_type::mapped_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multimap_container() {}
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/common.h b/contrib/restricted/abseil-cpp/absl/container/internal/common.h
index 030e9d4ab0..848a90f2af 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/common.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/common.h
@@ -1,206 +1,206 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
-#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
-
-#include <cassert>
-#include <type_traits>
-
-#include "absl/meta/type_traits.h"
-#include "absl/types/optional.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class, class = void>
-struct IsTransparent : std::false_type {};
-template <class T>
-struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
- : std::true_type {};
-
-template <bool is_transparent>
-struct KeyArg {
- // Transparent. Forward `K`.
- template <typename K, typename key_type>
- using type = K;
-};
-
-template <>
-struct KeyArg<false> {
- // Not transparent. Always use `key_type`.
- template <typename K, typename key_type>
- using type = key_type;
-};
-
-// The node_handle concept from C++17.
-// We specialize node_handle for sets and maps. node_handle_base holds the
-// common API of both.
-template <typename PolicyTraits, typename Alloc>
-class node_handle_base {
- protected:
- using slot_type = typename PolicyTraits::slot_type;
-
- public:
- using allocator_type = Alloc;
-
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
constexpr node_handle_base() = default;
- node_handle_base(node_handle_base&& other) noexcept {
- *this = std::move(other);
- }
- ~node_handle_base() { destroy(); }
- node_handle_base& operator=(node_handle_base&& other) noexcept {
- destroy();
- if (!other.empty()) {
- alloc_ = other.alloc_;
- PolicyTraits::transfer(alloc(), slot(), other.slot());
- other.reset();
- }
- return *this;
- }
-
- bool empty() const noexcept { return !alloc_; }
- explicit operator bool() const noexcept { return !empty(); }
- allocator_type get_allocator() const { return *alloc_; }
-
- protected:
- friend struct CommonAccess;
-
- struct transfer_tag_t {};
- node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
- : alloc_(a) {
- PolicyTraits::transfer(alloc(), slot(), s);
- }
-
- struct move_tag_t {};
- node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
- : alloc_(a) {
- PolicyTraits::construct(alloc(), slot(), s);
- }
-
- void destroy() {
- if (!empty()) {
- PolicyTraits::destroy(alloc(), slot());
- reset();
- }
- }
-
- void reset() {
- assert(alloc_.has_value());
- alloc_ = absl::nullopt;
- }
-
- slot_type* slot() const {
- assert(!empty());
- return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
- }
- allocator_type* alloc() { return std::addressof(*alloc_); }
-
- private:
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) noexcept {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ friend struct CommonAccess;
+
+ struct transfer_tag_t {};
+ node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ struct move_tag_t {};
+ node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::construct(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
absl::optional<allocator_type> alloc_ = {};
alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
-};
-
-// For sets.
-template <typename Policy, typename PolicyTraits, typename Alloc,
- typename = void>
-class node_handle : public node_handle_base<PolicyTraits, Alloc> {
- using Base = node_handle_base<PolicyTraits, Alloc>;
-
- public:
- using value_type = typename PolicyTraits::value_type;
-
- constexpr node_handle() {}
-
- value_type& value() const { return PolicyTraits::element(this->slot()); }
-
- private:
- friend struct CommonAccess;
-
- using Base::Base;
-};
-
-// For maps.
-template <typename Policy, typename PolicyTraits, typename Alloc>
-class node_handle<Policy, PolicyTraits, Alloc,
- absl::void_t<typename Policy::mapped_type>>
- : public node_handle_base<PolicyTraits, Alloc> {
- using Base = node_handle_base<PolicyTraits, Alloc>;
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+ typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
+
+ public:
+ using value_type = typename PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+ absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
using slot_type = typename PolicyTraits::slot_type;
-
- public:
- using key_type = typename Policy::key_type;
- using mapped_type = typename Policy::mapped_type;
-
- constexpr node_handle() {}
-
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
// When C++17 is available, we can use std::launder to provide mutable
// access to the key. Otherwise, we provide const access.
auto key() const
-> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
return PolicyTraits::mutable_key(this->slot());
- }
-
- mapped_type& mapped() const {
- return PolicyTraits::value(&PolicyTraits::element(this->slot()));
- }
-
- private:
- friend struct CommonAccess;
-
- using Base::Base;
-};
-
-// Provide access to non-public node-handle functions.
-struct CommonAccess {
- template <typename Node>
- static auto GetSlot(const Node& node) -> decltype(node.slot()) {
- return node.slot();
- }
-
- template <typename Node>
+ }
+
+ mapped_type& mapped() const {
+ return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+ template <typename Node>
+ static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+ return node.slot();
+ }
+
+ template <typename Node>
static void Destroy(Node* node) {
node->destroy();
}
template <typename Node>
- static void Reset(Node* node) {
- node->reset();
- }
-
- template <typename T, typename... Args>
- static T Transfer(Args&&... args) {
- return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
- }
-
- template <typename T, typename... Args>
- static T Move(Args&&... args) {
- return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
- }
-};
-
-// Implement the insert_return_type<> concept of C++17.
-template <class Iterator, class NodeType>
-struct InsertReturnType {
- Iterator position;
- bool inserted;
- NodeType node;
-};
-
-} // namespace container_internal
+ static void Reset(Node* node) {
+ node->reset();
+ }
+
+ template <typename T, typename... Args>
+ static T Transfer(Args&&... args) {
+ return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static T Move(Args&&... args) {
+ return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h b/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
index 5ebe164942..1c41d93c76 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
@@ -1,179 +1,179 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Helper class to perform the Empty Base Optimization.
-// Ts can contain classes and non-classes, empty or not. For the ones that
-// are empty classes, we perform the optimization. If all types in Ts are empty
-// classes, then CompressedTuple<Ts...> is itself an empty class.
-//
-// To access the members, use member get<N>() function.
-//
-// Eg:
-// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
-// t3);
-// assert(value.get<0>() == 7);
-// T1& t1 = value.get<1>();
-// const T2& t2 = value.get<2>();
-// ...
-//
-// https://en.cppreference.com/w/cpp/language/ebo
-
-#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
-#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
-
-#include <initializer_list>
-#include <tuple>
-#include <type_traits>
-#include <utility>
-
-#include "absl/utility/utility.h"
-
-#if defined(_MSC_VER) && !defined(__NVCC__)
-// We need to mark these classes with this declspec to ensure that
-// CompressedTuple happens.
-#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
-#else
-#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
-#endif
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Helper class to perform the Empty Base Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the optimization. If all types in Ts are empty
+// classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/utility/utility.h"
+
+#if defined(_MSC_VER) && !defined(__NVCC__)
+// We need to mark these classes with this declspec to ensure that
+// CompressedTuple happens.
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
+#else
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <typename... Ts>
-class CompressedTuple;
-
-namespace internal_compressed_tuple {
-
-template <typename D, size_t I>
-struct Elem;
-template <typename... B, size_t I>
-struct Elem<CompressedTuple<B...>, I>
- : std::tuple_element<I, std::tuple<B...>> {};
-template <typename D, size_t I>
-using ElemT = typename Elem<D, I>::type;
-
-// Use the __is_final intrinsic if available. Where it's not available, classes
-// declared with the 'final' specifier cannot be used as CompressedTuple
-// elements.
-// TODO(sbenza): Replace this with std::is_final in C++14.
-template <typename T>
-constexpr bool IsFinal() {
-#if defined(__clang__) || defined(__GNUC__)
- return __is_final(T);
-#else
- return false;
-#endif
-}
-
-// We can't use EBCO on other CompressedTuples because that would mean that we
-// derive from multiple Storage<> instantiations with the same I parameter,
-// and potentially from multiple identical Storage<> instantiations. So anytime
-// we use type inheritance rather than encapsulation, we mark
-// CompressedTupleImpl, to make this easy to detect.
-struct uses_inheritance {};
-
-template <typename T>
-constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
- !std::is_base_of<uses_inheritance, T>::value;
-}
-
-// The storage class provides two specializations:
-// - For empty classes, it stores T as a base class.
-// - For everything else, it stores T as a member.
-template <typename T, size_t I,
-#if defined(_MSC_VER)
- bool UseBase =
- ShouldUseBase<typename std::enable_if<true, T>::type>()>
-#else
- bool UseBase = ShouldUseBase<T>()>
-#endif
-struct Storage {
- T value;
- constexpr Storage() = default;
- template <typename V>
- explicit constexpr Storage(absl::in_place_t, V&& v)
- : value(absl::forward<V>(v)) {}
- constexpr const T& get() const& { return value; }
- T& get() & { return value; }
- constexpr const T&& get() const&& { return absl::move(*this).value; }
- T&& get() && { return std::move(*this).value; }
-};
-
-template <typename T, size_t I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
- constexpr Storage() = default;
-
- template <typename V>
- explicit constexpr Storage(absl::in_place_t, V&& v)
- : T(absl::forward<V>(v)) {}
-
- constexpr const T& get() const& { return *this; }
- T& get() & { return *this; }
- constexpr const T&& get() const&& { return absl::move(*this); }
- T&& get() && { return std::move(*this); }
-};
-
-template <typename D, typename I, bool ShouldAnyUseBase>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
-
-template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
- // We use the dummy identity function through std::integral_constant to
- // convince MSVC of accepting and expanding I in that context. Without it
- // you would get:
- // error C3548: 'I': parameter pack cannot be used in this context
- : uses_inheritance,
- Storage<Ts, std::integral_constant<size_t, I>::value>... {
- constexpr CompressedTupleImpl() = default;
- template <typename... Vs>
- explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
- : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
- friend CompressedTuple<Ts...>;
-};
-
-template <typename... Ts, size_t... I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
- // We use the dummy identity function as above...
- : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
- constexpr CompressedTupleImpl() = default;
- template <typename... Vs>
- explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
- : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
- friend CompressedTuple<Ts...>;
-};
-
-std::false_type Or(std::initializer_list<std::false_type>);
-std::true_type Or(std::initializer_list<bool>);
-
-// MSVC requires this to be done separately rather than within the declaration
-// of CompressedTuple below.
-template <typename... Ts>
-constexpr bool ShouldAnyUseBase() {
- return decltype(
- Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
-}
-
-template <typename T, typename V>
+namespace container_internal {
+
+template <typename... Ts>
+class CompressedTuple;
+
+namespace internal_compressed_tuple {
+
+template <typename D, size_t I>
+struct Elem;
+template <typename... B, size_t I>
+struct Elem<CompressedTuple<B...>, I>
+ : std::tuple_element<I, std::tuple<B...>> {};
+template <typename D, size_t I>
+using ElemT = typename Elem<D, I>::type;
+
+// Use the __is_final intrinsic if available. Where it's not available, classes
+// declared with the 'final' specifier cannot be used as CompressedTuple
+// elements.
+// TODO(sbenza): Replace this with std::is_final in C++14.
+template <typename T>
+constexpr bool IsFinal() {
+#if defined(__clang__) || defined(__GNUC__)
+ return __is_final(T);
+#else
+ return false;
+#endif
+}
+
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
+template <typename T>
+constexpr bool ShouldUseBase() {
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
+}
+
+// The storage class provides two specializations:
+// - For empty classes, it stores T as a base class.
+// - For everything else, it stores T as a member.
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
+struct Storage {
+ T value;
+ constexpr Storage() = default;
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : value(absl::forward<V>(v)) {}
+ constexpr const T& get() const& { return value; }
+ T& get() & { return value; }
+ constexpr const T&& get() const&& { return absl::move(*this).value; }
+ T&& get() && { return std::move(*this).value; }
+};
+
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
+ constexpr Storage() = default;
+
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : T(absl::forward<V>(v)) {}
+
+ constexpr const T& get() const& { return *this; }
+ T& get() & { return *this; }
+ constexpr const T&& get() const&& { return absl::move(*this); }
+ T&& get() && { return std::move(*this); }
+};
+
+template <typename D, typename I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
+
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
+ // We use the dummy identity function through std::integral_constant to
+ // convince MSVC of accepting and expanding I in that context. Without it
+ // you would get:
+ // error C3548: 'I': parameter pack cannot be used in this context
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
using TupleElementMoveConstructible =
typename std::conditional<std::is_reference<T>::value,
std::is_convertible<V, T>,
std::is_constructible<T, V&&>>::type;
-
+
template <bool SizeMatches, class T, class... Vs>
struct TupleMoveConstructible : std::false_type {};
@@ -197,94 +197,94 @@ struct TupleItemsMoveConstructible
sizeof...(Vs),
T, Vs...>::value> {};
-} // namespace internal_compressed_tuple
-
-// Helper class to perform the Empty Base Class Optimization.
-// Ts can contain classes and non-classes, empty or not. For the ones that
-// are empty classes, we perform the CompressedTuple. If all types in Ts are
-// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
-// does not apply when one or more of those empty classes is itself an empty
-// CompressedTuple.)
-//
-// To access the members, use member .get<N>() function.
-//
-// Eg:
-// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
-// t3);
-// assert(value.get<0>() == 7);
-// T1& t1 = value.get<1>();
-// const T2& t2 = value.get<2>();
-// ...
-//
-// https://en.cppreference.com/w/cpp/language/ebo
-template <typename... Ts>
-class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
- : private internal_compressed_tuple::CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
- internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
- private:
- template <int I>
- using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
-
- template <int I>
- using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
-
- public:
- // There seems to be a bug in MSVC dealing in which using '=default' here will
- // cause the compiler to ignore the body of other constructors. The work-
- // around is to explicitly implement the default constructor.
-#if defined(_MSC_VER)
- constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
-#else
- constexpr CompressedTuple() = default;
-#endif
- explicit constexpr CompressedTuple(const Ts&... base)
- : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
-
+} // namespace internal_compressed_tuple
+
+// Helper class to perform the Empty Base Class Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the CompressedTuple. If all types in Ts are
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
+//
+// To access the members, use member .get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+template <typename... Ts>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
+ : private internal_compressed_tuple::CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
+ private:
+ template <int I>
+ using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+
+ template <int I>
+ using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
+ public:
+ // There seems to be a bug in MSVC dealing in which using '=default' here will
+ // cause the compiler to ignore the body of other constructors. The work-
+ // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+ constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
+ constexpr CompressedTuple() = default;
+#endif
+ explicit constexpr CompressedTuple(const Ts&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
+
template <typename First, typename... Vs,
- absl::enable_if_t<
- absl::conjunction<
- // Ensure we are not hiding default copy/move constructors.
- absl::negation<std::is_same<void(CompressedTuple),
+ absl::enable_if_t<
+ absl::conjunction<
+ // Ensure we are not hiding default copy/move constructors.
+ absl::negation<std::is_same<void(CompressedTuple),
void(absl::decay_t<First>)>>,
internal_compressed_tuple::TupleItemsMoveConstructible<
CompressedTuple<Ts...>, First, Vs...>>::value,
- bool> = true>
+ bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
- : CompressedTuple::CompressedTupleImpl(absl::in_place,
+ : CompressedTuple::CompressedTupleImpl(absl::in_place,
absl::forward<First>(first),
- absl::forward<Vs>(base)...) {}
-
- template <int I>
- ElemT<I>& get() & {
- return StorageT<I>::get();
- }
-
- template <int I>
- constexpr const ElemT<I>& get() const& {
+ absl::forward<Vs>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() & {
return StorageT<I>::get();
- }
-
- template <int I>
- ElemT<I>&& get() && {
- return std::move(*this).StorageT<I>::get();
- }
-
- template <int I>
- constexpr const ElemT<I>&& get() const&& {
- return absl::move(*this).StorageT<I>::get();
- }
-};
-
-// Explicit specialization for a zero-element tuple
-// (needed to avoid ambiguous overloads for the default constructor).
-template <>
-class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
-
-} // namespace container_internal
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const& {
+ return StorageT<I>::get();
+ }
+
+ template <int I>
+ ElemT<I>&& get() && {
+ return std::move(*this).StorageT<I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>&& get() const&& {
+ return absl::move(*this).StorageT<I>::get();
+ }
+};
+
+// Explicit specialization for a zero-element tuple
+// (needed to avoid ambiguous overloads for the default constructor).
+template <>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
-
-#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+} // namespace absl
+
+#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+
+#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
index e67529ecb6..6a27355262 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
@@ -1,33 +1,33 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
-#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
-
-#include <cassert>
-#include <cstddef>
-#include <memory>
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
#include <new>
-#include <tuple>
-#include <type_traits>
-#include <utility>
-
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
#include "absl/base/config.h"
-#include "absl/memory/memory.h"
+#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/utility/utility.h"
-
+#include "absl/utility/utility.h"
+
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
@@ -36,329 +36,329 @@
#include <sanitizer/msan_interface.h>
#endif
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
+namespace container_internal {
+
template <size_t Alignment>
struct alignas(Alignment) AlignedType {};
-// Allocates at least n bytes aligned to the specified alignment.
-// Alignment must be a power of 2. It must be positive.
-//
-// Note that many allocators don't honor alignment requirements above certain
-// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
-// Allocate() doesn't apply alignment corrections. If the underlying allocator
-// returns insufficiently alignment pointer, that's what you are going to get.
-template <size_t Alignment, class Alloc>
-void* Allocate(Alloc* alloc, size_t n) {
- static_assert(Alignment > 0, "");
- assert(n && "n must be positive");
+// Allocates at least n bytes aligned to the specified alignment.
+// Alignment must be a power of 2. It must be positive.
+//
+// Note that many allocators don't honor alignment requirements above certain
+// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
+// Allocate() doesn't apply alignment corrections. If the underlying allocator
+// returns insufficiently alignment pointer, that's what you are going to get.
+template <size_t Alignment, class Alloc>
+void* Allocate(Alloc* alloc, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
using M = AlignedType<Alignment>;
- using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
- using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
- assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
- "allocator does not respect alignment");
- return p;
-}
-
-// The pointer must have been previously obtained by calling
-// Allocate<Alignment>(alloc, n).
-template <size_t Alignment, class Alloc>
-void Deallocate(Alloc* alloc, void* p, size_t n) {
- static_assert(Alignment > 0, "");
- assert(n && "n must be positive");
+ assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
+ "allocator does not respect alignment");
+ return p;
+}
+
+// The pointer must have been previously obtained by calling
+// Allocate<Alignment>(alloc, n).
+template <size_t Alignment, class Alloc>
+void Deallocate(Alloc* alloc, void* p, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
using M = AlignedType<Alignment>;
- using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
- using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p),
- (n + sizeof(M) - 1) / sizeof(M));
-}
-
-namespace memory_internal {
-
-// Constructs T into uninitialized storage pointed by `ptr` using the args
-// specified in the tuple.
-template <class Alloc, class T, class Tuple, size_t... I>
-void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
- absl::index_sequence<I...>) {
- absl::allocator_traits<Alloc>::construct(
- *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
-}
-
-template <class T, class F>
-struct WithConstructedImplF {
- template <class... Args>
- decltype(std::declval<F>()(std::declval<T>())) operator()(
- Args&&... args) const {
- return std::forward<F>(f)(T(std::forward<Args>(args)...));
- }
- F&& f;
-};
-
-template <class T, class Tuple, size_t... Is, class F>
-decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
- Tuple&& t, absl::index_sequence<Is...>, F&& f) {
- return WithConstructedImplF<T, F>{std::forward<F>(f)}(
- std::get<Is>(std::forward<Tuple>(t))...);
-}
-
-template <class T, size_t... Is>
-auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
- -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
- return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
-}
-
-// Returns a tuple of references to the elements of the input tuple. T must be a
-// tuple.
-template <class T>
-auto TupleRef(T&& t) -> decltype(
- TupleRefImpl(std::forward<T>(t),
- absl::make_index_sequence<
- std::tuple_size<typename std::decay<T>::type>::value>())) {
- return TupleRefImpl(
- std::forward<T>(t),
- absl::make_index_sequence<
- std::tuple_size<typename std::decay<T>::type>::value>());
-}
-
-template <class F, class K, class V>
-decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
- std::declval<std::tuple<K>>(), std::declval<V>()))
-DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
- const auto& key = std::get<0>(p.first);
- return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
- std::move(p.second));
-}
-
-} // namespace memory_internal
-
-// Constructs T into uninitialized storage pointed by `ptr` using the args
-// specified in the tuple.
-template <class Alloc, class T, class Tuple>
-void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
- memory_internal::ConstructFromTupleImpl(
- alloc, ptr, std::forward<Tuple>(t),
- absl::make_index_sequence<
- std::tuple_size<typename std::decay<Tuple>::type>::value>());
-}
-
-// Constructs T using the args specified in the tuple and calls F with the
-// constructed value.
-template <class T, class Tuple, class F>
-decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
- Tuple&& t, F&& f) {
- return memory_internal::WithConstructedImpl<T>(
- std::forward<Tuple>(t),
- absl::make_index_sequence<
- std::tuple_size<typename std::decay<Tuple>::type>::value>(),
- std::forward<F>(f));
-}
-
-// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
-// tuples with references to the passed arguments. The tuples contain
-// constructor arguments for the first and the second elements of the pair.
-//
-// The following two snippets are equivalent.
-//
-// 1. std::pair<F, S> p(args...);
-//
-// 2. auto a = PairArgs(args...);
-// std::pair<F, S> p(std::piecewise_construct,
-// std::move(p.first), std::move(p.second));
-inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
-template <class F, class S>
-std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
- return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
- std::forward_as_tuple(std::forward<S>(s))};
-}
-template <class F, class S>
-std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
- const std::pair<F, S>& p) {
- return PairArgs(p.first, p.second);
-}
-template <class F, class S>
-std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
- return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
-}
-template <class F, class S>
-auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
- -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
- memory_internal::TupleRef(std::forward<S>(s)))) {
- return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
- memory_internal::TupleRef(std::forward<S>(s)));
-}
-
-// A helper function for implementing apply() in map policies.
-template <class F, class... Args>
-auto DecomposePair(F&& f, Args&&... args)
- -> decltype(memory_internal::DecomposePairImpl(
- std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
- return memory_internal::DecomposePairImpl(
- std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
-}
-
-// A helper function for implementing apply() in set policies.
-template <class F, class Arg>
-decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
-DecomposeValue(F&& f, Arg&& arg) {
- const auto& key = arg;
- return std::forward<F>(f)(key, std::forward<Arg>(arg));
-}
-
-// Helper functions for asan and msan.
-inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
+ (n + sizeof(M) - 1) / sizeof(M));
+}
+
+namespace memory_internal {
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple, size_t... I>
+void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
+ absl::index_sequence<I...>) {
+ absl::allocator_traits<Alloc>::construct(
+ *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class F>
+struct WithConstructedImplF {
+ template <class... Args>
+ decltype(std::declval<F>()(std::declval<T>())) operator()(
+ Args&&... args) const {
+ return std::forward<F>(f)(T(std::forward<Args>(args)...));
+ }
+ F&& f;
+};
+
+template <class T, class Tuple, size_t... Is, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
+ Tuple&& t, absl::index_sequence<Is...>, F&& f) {
+ return WithConstructedImplF<T, F>{std::forward<F>(f)}(
+ std::get<Is>(std::forward<Tuple>(t))...);
+}
+
+template <class T, size_t... Is>
+auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
+ -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+ return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
+}
+
+// Returns a tuple of references to the elements of the input tuple. T must be a
+// tuple.
+template <class T>
+auto TupleRef(T&& t) -> decltype(
+ TupleRefImpl(std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>())) {
+ return TupleRefImpl(
+ std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>());
+}
+
+template <class F, class K, class V>
+decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(), std::declval<V>()))
+DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
+ const auto& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+}
+
+} // namespace memory_internal
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple>
+void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
+ memory_internal::ConstructFromTupleImpl(
+ alloc, ptr, std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>());
+}
+
+// Constructs T using the args specified in the tuple and calls F with the
+// constructed value.
+template <class T, class Tuple, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
+ Tuple&& t, F&& f) {
+ return memory_internal::WithConstructedImpl<T>(
+ std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>(),
+ std::forward<F>(f));
+}
+
+// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// tuples with references to the passed arguments. The tuples contain
+// constructor arguments for the first and the second elements of the pair.
+//
+// The following two snippets are equivalent.
+//
+// 1. std::pair<F, S> p(args...);
+//
+// 2. auto a = PairArgs(args...);
+// std::pair<F, S> p(std::piecewise_construct,
+// std::move(p.first), std::move(p.second));
+inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
+ return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
+ std::forward_as_tuple(std::forward<S>(s))};
+}
+template <class F, class S>
+std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
+ const std::pair<F, S>& p) {
+ return PairArgs(p.first, p.second);
+}
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
+ return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
+}
+template <class F, class S>
+auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
+ -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)))) {
+ return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)));
+}
+
+// A helper function for implementing apply() in map policies.
+template <class F, class... Args>
+auto DecomposePair(F&& f, Args&&... args)
+ -> decltype(memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
+ return memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
+}
+
+// A helper function for implementing apply() in set policies.
+template <class F, class Arg>
+decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
+DecomposeValue(F&& f, Arg&& arg) {
+ const auto& key = arg;
+ return std::forward<F>(f)(key, std::forward<Arg>(arg));
+}
+
+// Helper functions for asan and msan.
+inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
- ASAN_POISON_MEMORY_REGION(m, s);
-#endif
+ ASAN_POISON_MEMORY_REGION(m, s);
+#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
- __msan_poison(m, s);
-#endif
- (void)m;
- (void)s;
-}
-
-inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
+ __msan_poison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
- ASAN_UNPOISON_MEMORY_REGION(m, s);
-#endif
+ ASAN_UNPOISON_MEMORY_REGION(m, s);
+#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
- __msan_unpoison(m, s);
-#endif
- (void)m;
- (void)s;
-}
-
-template <typename T>
-inline void SanitizerPoisonObject(const T* object) {
- SanitizerPoisonMemoryRegion(object, sizeof(T));
-}
-
-template <typename T>
-inline void SanitizerUnpoisonObject(const T* object) {
- SanitizerUnpoisonMemoryRegion(object, sizeof(T));
-}
-
-namespace memory_internal {
-
-// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
-// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
-// offsetof(Pair, second) respectively. Otherwise they are -1.
-//
-// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
-// type, which is non-portable.
-template <class Pair, class = std::true_type>
-struct OffsetOf {
+ __msan_unpoison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+template <typename T>
+inline void SanitizerPoisonObject(const T* object) {
+ SanitizerPoisonMemoryRegion(object, sizeof(T));
+}
+
+template <typename T>
+inline void SanitizerUnpoisonObject(const T* object) {
+ SanitizerUnpoisonMemoryRegion(object, sizeof(T));
+}
+
+namespace memory_internal {
+
+// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
+// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
+// offsetof(Pair, second) respectively. Otherwise they are -1.
+//
+// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
+// type, which is non-portable.
+template <class Pair, class = std::true_type>
+struct OffsetOf {
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
-};
-
-template <class Pair>
-struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
- static constexpr size_t kFirst = offsetof(Pair, first);
- static constexpr size_t kSecond = offsetof(Pair, second);
-};
-
-template <class K, class V>
-struct IsLayoutCompatible {
- private:
- struct Pair {
- K first;
- V second;
- };
-
- // Is P layout-compatible with Pair?
- template <class P>
- static constexpr bool LayoutCompatible() {
- return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
- alignof(P) == alignof(Pair) &&
- memory_internal::OffsetOf<P>::kFirst ==
- memory_internal::OffsetOf<Pair>::kFirst &&
- memory_internal::OffsetOf<P>::kSecond ==
- memory_internal::OffsetOf<Pair>::kSecond;
- }
-
- public:
- // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
- // then it is safe to store them in a union and read from either.
- static constexpr bool value = std::is_standard_layout<K>() &&
- std::is_standard_layout<Pair>() &&
- memory_internal::OffsetOf<Pair>::kFirst == 0 &&
- LayoutCompatible<std::pair<K, V>>() &&
- LayoutCompatible<std::pair<const K, V>>();
-};
-
-} // namespace memory_internal
-
-// The internal storage type for key-value containers like flat_hash_map.
-//
-// It is convenient for the value_type of a flat_hash_map<K, V> to be
-// pair<const K, V>; the "const K" prevents accidental modification of the key
-// when dealing with the reference returned from find() and similar methods.
-// However, this creates other problems; we want to be able to emplace(K, V)
-// efficiently with move operations, and similarly be able to move a
-// pair<K, V> in insert().
-//
-// The solution is this union, which aliases the const and non-const versions
-// of the pair. This also allows flat_hash_map<const K, V> to work, even though
-// that has the same efficiency issues with move in emplace() and insert() -
-// but people do it anyway.
-//
-// If kMutableKeys is false, only the value member can be accessed.
-//
-// If kMutableKeys is true, key can be accessed through all slots while value
-// and mutable_value must be accessed only via INITIALIZED slots. Slots are
-// created and destroyed via mutable_value so that the key can be moved later.
-//
-// Accessing one of the union fields while the other is active is safe as
-// long as they are layout-compatible, which is guaranteed by the definition of
-// kMutableKeys. For C++11, the relevant section of the standard is
-// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
-template <class K, class V>
-union map_slot_type {
- map_slot_type() {}
- ~map_slot_type() = delete;
- using value_type = std::pair<const K, V>;
+};
+
+template <class Pair>
+struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
+ static constexpr size_t kFirst = offsetof(Pair, first);
+ static constexpr size_t kSecond = offsetof(Pair, second);
+};
+
+template <class K, class V>
+struct IsLayoutCompatible {
+ private:
+ struct Pair {
+ K first;
+ V second;
+ };
+
+ // Is P layout-compatible with Pair?
+ template <class P>
+ static constexpr bool LayoutCompatible() {
+ return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
+ alignof(P) == alignof(Pair) &&
+ memory_internal::OffsetOf<P>::kFirst ==
+ memory_internal::OffsetOf<Pair>::kFirst &&
+ memory_internal::OffsetOf<P>::kSecond ==
+ memory_internal::OffsetOf<Pair>::kSecond;
+ }
+
+ public:
+ // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
+ // then it is safe to store them in a union and read from either.
+ static constexpr bool value = std::is_standard_layout<K>() &&
+ std::is_standard_layout<Pair>() &&
+ memory_internal::OffsetOf<Pair>::kFirst == 0 &&
+ LayoutCompatible<std::pair<K, V>>() &&
+ LayoutCompatible<std::pair<const K, V>>();
+};
+
+} // namespace memory_internal
+
+// The internal storage type for key-value containers like flat_hash_map.
+//
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
+template <class K, class V>
+union map_slot_type {
+ map_slot_type() {}
+ ~map_slot_type() = delete;
+ using value_type = std::pair<const K, V>;
using mutable_value_type =
std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
-
- value_type value;
- mutable_value_type mutable_value;
+
+ value_type value;
+ mutable_value_type mutable_value;
absl::remove_const_t<K> key;
-};
-
-template <class K, class V>
-struct map_slot_policy {
- using slot_type = map_slot_type<K, V>;
- using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
-
- private:
- static void emplace(slot_type* slot) {
- // The construction of union doesn't do anything at runtime but it allows us
- // to access its members without violating aliasing rules.
- new (slot) slot_type;
- }
- // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
- // or the other via slot_type. We are also free to access the key via
- // slot_type::key in this case.
- using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
-
- public:
- static value_type& element(slot_type* slot) { return slot->value; }
- static const value_type& element(const slot_type* slot) {
- return slot->value;
- }
-
+};
+
+template <class K, class V>
+struct map_slot_policy {
+ using slot_type = map_slot_type<K, V>;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ private:
+ static void emplace(slot_type* slot) {
+ // The construction of union doesn't do anything at runtime but it allows us
+ // to access its members without violating aliasing rules.
+ new (slot) slot_type;
+ }
+ // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
+ // or the other via slot_type. We are also free to access the key via
+ // slot_type::key in this case.
+ using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
+
+ public:
+ static value_type& element(slot_type* slot) { return slot->value; }
+ static const value_type& element(const slot_type* slot) {
+ return slot->value;
+ }
+
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
@@ -373,88 +373,88 @@ struct map_slot_policy {
static const K& mutable_key(slot_type* slot) { return key(slot); }
#endif
- static const K& key(const slot_type* slot) {
- return kMutableKeys::value ? slot->key : slot->value.first;
- }
-
- template <class Allocator, class... Args>
- static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
- emplace(slot);
- if (kMutableKeys::value) {
- absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
- std::forward<Args>(args)...);
- } else {
- absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
- std::forward<Args>(args)...);
- }
- }
-
- // Construct this slot by moving from another slot.
- template <class Allocator>
- static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
- emplace(slot);
- if (kMutableKeys::value) {
- absl::allocator_traits<Allocator>::construct(
- *alloc, &slot->mutable_value, std::move(other->mutable_value));
- } else {
- absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
- std::move(other->value));
- }
- }
-
- template <class Allocator>
- static void destroy(Allocator* alloc, slot_type* slot) {
- if (kMutableKeys::value) {
- absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
- } else {
- absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
- }
- }
-
- template <class Allocator>
- static void transfer(Allocator* alloc, slot_type* new_slot,
- slot_type* old_slot) {
- emplace(new_slot);
- if (kMutableKeys::value) {
- absl::allocator_traits<Allocator>::construct(
- *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
- } else {
- absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
- std::move(old_slot->value));
- }
- destroy(alloc, old_slot);
- }
-
- template <class Allocator>
- static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
- if (kMutableKeys::value) {
- using std::swap;
- swap(a->mutable_value, b->mutable_value);
- } else {
- value_type tmp = std::move(a->value);
- absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
- std::move(b->value));
- absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
- std::move(tmp));
- }
- }
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
- if (kMutableKeys::value) {
- dest->mutable_value = std::move(src->mutable_value);
- } else {
- absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
- std::move(src->value));
- }
- }
-};
-
-} // namespace container_internal
+ static const K& key(const slot_type* slot) {
+ return kMutableKeys::value ? slot->key : slot->value.first;
+ }
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
+ std::forward<Args>(args)...);
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::forward<Args>(args)...);
+ }
+ }
+
+ // Construct this slot by moving from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &slot->mutable_value, std::move(other->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::move(other->value));
+ }
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
+ }
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ emplace(new_slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
+ std::move(old_slot->value));
+ }
+ destroy(alloc, old_slot);
+ }
+
+ template <class Allocator>
+ static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
+ if (kMutableKeys::value) {
+ using std::swap;
+ swap(a->mutable_value, b->mutable_value);
+ } else {
+ value_type tmp = std::move(a->value);
+ absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
+ std::move(b->value));
+ absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
+ std::move(tmp));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
+ if (kMutableKeys::value) {
+ dest->mutable_value = std::move(src->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
+ std::move(src->value));
+ }
+ }
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/counting_allocator.h b/contrib/restricted/abseil-cpp/absl/container/internal/counting_allocator.h
index 927cf08255..9676f0e077 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/counting_allocator.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/counting_allocator.h
@@ -1,37 +1,37 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-
-#include <cstdint>
-#include <memory>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cstdint>
+#include <memory>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// This is a stateful allocator, but the state lives outside of the
-// allocator (in whatever test is using the allocator). This is odd
-// but helps in tests where the allocator is propagated into nested
-// containers - that chain of allocators uses the same state and is
-// thus easier to query for aggregate allocation information.
-template <typename T>
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
class CountingAllocator {
- public:
+ public:
using Allocator = std::allocator<T>;
using AllocatorTraits = std::allocator_traits<Allocator>;
using value_type = typename AllocatorTraits::value_type;
@@ -39,16 +39,16 @@ class CountingAllocator {
using const_pointer = typename AllocatorTraits::const_pointer;
using size_type = typename AllocatorTraits::size_type;
using difference_type = typename AllocatorTraits::difference_type;
-
+
CountingAllocator() = default;
explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
: bytes_used_(bytes_used), instance_count_(instance_count) {}
-
- template <typename U>
- CountingAllocator(const CountingAllocator<U>& x)
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
: bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
-
+
pointer allocate(
size_type n,
typename AllocatorTraits::const_void_pointer hint = nullptr) {
@@ -58,16 +58,16 @@ class CountingAllocator {
*bytes_used_ += n * sizeof(T);
}
return ptr;
- }
-
- void deallocate(pointer p, size_type n) {
+ }
+
+ void deallocate(pointer p, size_type n) {
Allocator allocator;
AllocatorTraits::deallocate(allocator, p, n);
if (bytes_used_ != nullptr) {
*bytes_used_ -= n * sizeof(T);
}
- }
-
+ }
+
template <typename U, typename... Args>
void construct(U* p, Args&&... args) {
Allocator allocator;
@@ -87,28 +87,28 @@ class CountingAllocator {
}
template <typename U>
- class rebind {
- public:
- using other = CountingAllocator<U>;
- };
-
- friend bool operator==(const CountingAllocator& a,
- const CountingAllocator& b) {
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
return a.bytes_used_ == b.bytes_used_ &&
a.instance_count_ == b.instance_count_;
- }
-
- friend bool operator!=(const CountingAllocator& a,
- const CountingAllocator& b) {
- return !(a == b);
- }
-
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
int64_t* bytes_used_ = nullptr;
int64_t* instance_count_ = nullptr;
-};
-
-} // namespace container_internal
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
index 250e662c9d..6910956a64 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -1,83 +1,83 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Define the default Hash and Eq functions for SwissTable containers.
-//
-// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
-// functions for SwissTable containers. There are two reasons for this.
-//
-// SwissTable containers are power of 2 sized containers:
-//
-// This means they use the lower bits of the hash value to find the slot for
-// each entry. The typical hash function for integral types is the identity.
-// This is a very weak hash function for SwissTable and any power of 2 sized
-// hashtable implementation which will lead to excessive collisions. For
-// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
-//
-// SwissTable containers support heterogeneous lookup:
-//
-// In order to make heterogeneous lookup work, hash and equal functions must be
-// polymorphic. At the same time they have to satisfy the same requirements the
-// C++ standard imposes on hash functions and equality operators. That is:
-//
-// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
-// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
-//
-// For SwissTable containers this requirement is relaxed to allow a and b of
-// any and possibly different types. Note that like the standard the hash and
-// equal functions are still bound to T. This is important because some type U
-// can be hashed by/tested for equality differently depending on T. A notable
-// example is `const char*`. `const char*` is treated as a c-style string when
-// the hash function is hash<std::string> but as a pointer when the hash
-// function is hash<void*>.
-//
-#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
-#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
-
-#include <stdint.h>
-#include <cstddef>
-#include <memory>
-#include <string>
-#include <type_traits>
-
-#include "absl/base/config.h"
-#include "absl/hash/hash.h"
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Define the default Hash and Eq functions for SwissTable containers.
+//
+// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
+// functions for SwissTable containers. There are two reasons for this.
+//
+// SwissTable containers are power of 2 sized containers:
+//
+// This means they use the lower bits of the hash value to find the slot for
+// each entry. The typical hash function for integral types is the identity.
+// This is a very weak hash function for SwissTable and any power of 2 sized
+// hashtable implementation which will lead to excessive collisions. For
+// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
+//
+// SwissTable containers support heterogeneous lookup:
+//
+// In order to make heterogeneous lookup work, hash and equal functions must be
+// polymorphic. At the same time they have to satisfy the same requirements the
+// C++ standard imposes on hash functions and equality operators. That is:
+//
+// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
+// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
+//
+// For SwissTable containers this requirement is relaxed to allow a and b of
+// any and possibly different types. Note that like the standard the hash and
+// equal functions are still bound to T. This is important because some type U
+// can be hashed by/tested for equality differently depending on T. A notable
+// example is `const char*`. `const char*` is treated as a c-style string when
+// the hash function is hash<std::string> but as a pointer when the hash
+// function is hash<void*>.
+//
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+
+#include <stdint.h>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
-#include "absl/strings/string_view.h"
-
-namespace absl {
+#include "absl/strings/string_view.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// The hash of an object of type T is computed by using absl::Hash.
-template <class T, class E = void>
-struct HashEq {
- using Hash = absl::Hash<T>;
- using Eq = std::equal_to<T>;
-};
-
-struct StringHash {
- using is_transparent = void;
-
- size_t operator()(absl::string_view v) const {
- return absl::Hash<absl::string_view>{}(v);
- }
+namespace container_internal {
+
+// The hash of an object of type T is computed by using absl::Hash.
+template <class T, class E = void>
+struct HashEq {
+ using Hash = absl::Hash<T>;
+ using Eq = std::equal_to<T>;
+};
+
+struct StringHash {
+ using is_transparent = void;
+
+ size_t operator()(absl::string_view v) const {
+ return absl::Hash<absl::string_view>{}(v);
+ }
size_t operator()(const absl::Cord& v) const {
return absl::Hash<absl::Cord>{}(v);
}
-};
-
+};
+
struct StringEq {
using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
@@ -94,70 +94,70 @@ struct StringEq {
}
};
-// Supports heterogeneous lookup for string-like elements.
-struct StringHashEq {
- using Hash = StringHash;
+// Supports heterogeneous lookup for string-like elements.
+struct StringHashEq {
+ using Hash = StringHash;
using Eq = StringEq;
-};
-
-template <>
-struct HashEq<std::string> : StringHashEq {};
-template <>
-struct HashEq<absl::string_view> : StringHashEq {};
+};
+
+template <>
+struct HashEq<std::string> : StringHashEq {};
+template <>
+struct HashEq<absl::string_view> : StringHashEq {};
template <>
struct HashEq<absl::Cord> : StringHashEq {};
-
-// Supports heterogeneous lookup for pointers and smart pointers.
-template <class T>
-struct HashEq<T*> {
- struct Hash {
- using is_transparent = void;
- template <class U>
- size_t operator()(const U& ptr) const {
- return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
- }
- };
- struct Eq {
- using is_transparent = void;
- template <class A, class B>
- bool operator()(const A& a, const B& b) const {
- return HashEq::ToPtr(a) == HashEq::ToPtr(b);
- }
- };
-
- private:
- static const T* ToPtr(const T* ptr) { return ptr; }
- template <class U, class D>
- static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
- return ptr.get();
- }
- template <class U>
- static const T* ToPtr(const std::shared_ptr<U>& ptr) {
- return ptr.get();
- }
-};
-
-template <class T, class D>
-struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
-template <class T>
-struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
-
-// This header's visibility is restricted. If you need to access the default
-// hasher please use the container's ::hasher alias instead.
-//
-// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
-template <class T>
-using hash_default_hash = typename container_internal::HashEq<T>::Hash;
-
-// This header's visibility is restricted. If you need to access the default
-// key equal please use the container's ::key_equal alias instead.
-//
-// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
-template <class T>
-using hash_default_eq = typename container_internal::HashEq<T>::Eq;
-
-} // namespace container_internal
+
+// Supports heterogeneous lookup for pointers and smart pointers.
+template <class T>
+struct HashEq<T*> {
+ struct Hash {
+ using is_transparent = void;
+ template <class U>
+ size_t operator()(const U& ptr) const {
+ return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
+ }
+ };
+ struct Eq {
+ using is_transparent = void;
+ template <class A, class B>
+ bool operator()(const A& a, const B& b) const {
+ return HashEq::ToPtr(a) == HashEq::ToPtr(b);
+ }
+ };
+
+ private:
+ static const T* ToPtr(const T* ptr) { return ptr; }
+ template <class U, class D>
+ static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
+ return ptr.get();
+ }
+ template <class U>
+ static const T* ToPtr(const std::shared_ptr<U>& ptr) {
+ return ptr.get();
+ }
+};
+
+template <class T, class D>
+struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
+template <class T>
+struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+
+// This header's visibility is restricted. If you need to access the default
+// hasher please use the container's ::hasher alias instead.
+//
+// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
+template <class T>
+using hash_default_hash = typename container_internal::HashEq<T>::Hash;
+
+// This header's visibility is restricted. If you need to access the default
+// key equal please use the container's ::key_equal alias instead.
+//
+// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
+template <class T>
+using hash_default_eq = typename container_internal::HashEq<T>::Eq;
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
index f1f555a5c1..99318a2872 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -1,160 +1,160 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Generates random values for testing. Specialized only for the few types we
-// care about.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
-#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
-
-#include <stdint.h>
-
-#include <algorithm>
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates random values for testing. Specialized only for the few types we
+// care about.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+
+#include <stdint.h>
+
+#include <algorithm>
#include <cassert>
-#include <iosfwd>
-#include <random>
-#include <tuple>
-#include <type_traits>
-#include <utility>
+#include <iosfwd>
+#include <random>
+#include <tuple>
+#include <type_traits>
+#include <utility>
#include <vector>
-
-#include "absl/container/internal/hash_policy_testing.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
-#include "absl/strings/string_view.h"
-
-namespace absl {
+
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-namespace hash_internal {
-namespace generator_internal {
-
-template <class Container, class = void>
-struct IsMap : std::false_type {};
-
-template <class Map>
-struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
-
-} // namespace generator_internal
-
-std::mt19937_64* GetSharedRng();
-
-enum Enum {
- kEnumEmpty,
- kEnumDeleted,
-};
-
-enum class EnumClass : uint64_t {
- kEmpty,
- kDeleted,
-};
-
-inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
- return o << static_cast<uint64_t>(ec);
-}
-
-template <class T, class E = void>
-struct Generator;
-
-template <class T>
-struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
- T operator()() const {
- std::uniform_int_distribution<T> dist;
- return dist(*GetSharedRng());
- }
-};
-
-template <>
-struct Generator<Enum> {
- Enum operator()() const {
- std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
- dist;
- while (true) {
- auto variate = dist(*GetSharedRng());
- if (variate != kEnumEmpty && variate != kEnumDeleted)
- return static_cast<Enum>(variate);
- }
- }
-};
-
-template <>
-struct Generator<EnumClass> {
- EnumClass operator()() const {
- std::uniform_int_distribution<
- typename std::underlying_type<EnumClass>::type>
- dist;
- while (true) {
- EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
- if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
- return static_cast<EnumClass>(variate);
- }
- }
-};
-
-template <>
-struct Generator<std::string> {
- std::string operator()() const;
-};
-
-template <>
-struct Generator<absl::string_view> {
- absl::string_view operator()() const;
-};
-
-template <>
-struct Generator<NonStandardLayout> {
- NonStandardLayout operator()() const {
- return NonStandardLayout(Generator<std::string>()());
- }
-};
-
-template <class K, class V>
-struct Generator<std::pair<K, V>> {
- std::pair<K, V> operator()() const {
- return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
- Generator<typename std::decay<V>::type>()());
- }
-};
-
-template <class... Ts>
-struct Generator<std::tuple<Ts...>> {
- std::tuple<Ts...> operator()() const {
- return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
- }
-};
-
-template <class T>
-struct Generator<std::unique_ptr<T>> {
- std::unique_ptr<T> operator()() const {
- return absl::make_unique<T>(Generator<T>()());
- }
-};
-
-template <class U>
-struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
- decltype(std::declval<U&>().value())>>
- : Generator<std::pair<
- typename std::decay<decltype(std::declval<U&>().key())>::type,
- typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
-
-template <class Container>
-using GeneratedType = decltype(
- std::declval<const Generator<
- typename std::conditional<generator_internal::IsMap<Container>::value,
- typename Container::value_type,
- typename Container::key_type>::type>&>()());
-
+namespace container_internal {
+namespace hash_internal {
+namespace generator_internal {
+
+template <class Container, class = void>
+struct IsMap : std::false_type {};
+
+template <class Map>
+struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
+
+} // namespace generator_internal
+
+std::mt19937_64* GetSharedRng();
+
+enum Enum {
+ kEnumEmpty,
+ kEnumDeleted,
+};
+
+enum class EnumClass : uint64_t {
+ kEmpty,
+ kDeleted,
+};
+
+inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
+ return o << static_cast<uint64_t>(ec);
+}
+
+template <class T, class E = void>
+struct Generator;
+
+template <class T>
+struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
+ T operator()() const {
+ std::uniform_int_distribution<T> dist;
+ return dist(*GetSharedRng());
+ }
+};
+
+template <>
+struct Generator<Enum> {
+ Enum operator()() const {
+ std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
+ dist;
+ while (true) {
+ auto variate = dist(*GetSharedRng());
+ if (variate != kEnumEmpty && variate != kEnumDeleted)
+ return static_cast<Enum>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<EnumClass> {
+ EnumClass operator()() const {
+ std::uniform_int_distribution<
+ typename std::underlying_type<EnumClass>::type>
+ dist;
+ while (true) {
+ EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
+ if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
+ return static_cast<EnumClass>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<std::string> {
+ std::string operator()() const;
+};
+
+template <>
+struct Generator<absl::string_view> {
+ absl::string_view operator()() const;
+};
+
+template <>
+struct Generator<NonStandardLayout> {
+ NonStandardLayout operator()() const {
+ return NonStandardLayout(Generator<std::string>()());
+ }
+};
+
+template <class K, class V>
+struct Generator<std::pair<K, V>> {
+ std::pair<K, V> operator()() const {
+ return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
+ Generator<typename std::decay<V>::type>()());
+ }
+};
+
+template <class... Ts>
+struct Generator<std::tuple<Ts...>> {
+ std::tuple<Ts...> operator()() const {
+ return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
+ }
+};
+
+template <class T>
+struct Generator<std::unique_ptr<T>> {
+ std::unique_ptr<T> operator()() const {
+ return absl::make_unique<T>(Generator<T>()());
+ }
+};
+
+template <class U>
+struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
+ decltype(std::declval<U&>().value())>>
+ : Generator<std::pair<
+ typename std::decay<decltype(std::declval<U&>().key())>::type,
+ typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
+
+template <class Container>
+using GeneratedType = decltype(
+ std::declval<const Generator<
+ typename std::conditional<generator_internal::IsMap<Container>::value,
+ typename Container::value_type,
+ typename Container::key_type>::type>&>()());
+
// Naive wrapper that performs a linear search of previous values.
// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
template <class T, size_t kMaxValues = 64, class E = void>
@@ -174,9 +174,9 @@ struct UniqueGenerator {
}
};
-} // namespace hash_internal
-} // namespace container_internal
+} // namespace hash_internal
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
index 01c40d2e5c..2966c9d5c2 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_testing.h
@@ -1,184 +1,184 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Utilities to help tests verify that hash tables properly handle stateful
-// allocators and hash functions.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
-#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
-
-#include <cstdlib>
-#include <limits>
-#include <memory>
-#include <ostream>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-#include "absl/hash/hash.h"
-#include "absl/strings/string_view.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities to help tests verify that hash tables properly handle stateful
+// allocators and hash functions.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-namespace hash_testing_internal {
-
-template <class Derived>
-struct WithId {
- WithId() : id_(next_id<Derived>()) {}
- WithId(const WithId& that) : id_(that.id_) {}
- WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
- WithId& operator=(const WithId& that) {
- id_ = that.id_;
- return *this;
- }
- WithId& operator=(WithId&& that) {
- id_ = that.id_;
- that.id_ = 0;
- return *this;
- }
-
- size_t id() const { return id_; }
-
- friend bool operator==(const WithId& a, const WithId& b) {
- return a.id_ == b.id_;
- }
- friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
-
- protected:
- explicit WithId(size_t id) : id_(id) {}
-
- private:
- size_t id_;
-
- template <class T>
- static size_t next_id() {
- // 0 is reserved for moved from state.
- static size_t gId = 1;
- return gId++;
- }
-};
-
-} // namespace hash_testing_internal
-
-struct NonStandardLayout {
- NonStandardLayout() {}
- explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
- virtual ~NonStandardLayout() {}
-
- friend bool operator==(const NonStandardLayout& a,
- const NonStandardLayout& b) {
- return a.value == b.value;
- }
- friend bool operator!=(const NonStandardLayout& a,
- const NonStandardLayout& b) {
- return a.value != b.value;
- }
-
- template <typename H>
- friend H AbslHashValue(H h, const NonStandardLayout& v) {
- return H::combine(std::move(h), v.value);
- }
-
- std::string value;
-};
-
-struct StatefulTestingHash
- : absl::container_internal::hash_testing_internal::WithId<
- StatefulTestingHash> {
- template <class T>
- size_t operator()(const T& t) const {
- return absl::Hash<T>{}(t);
- }
-};
-
-struct StatefulTestingEqual
- : absl::container_internal::hash_testing_internal::WithId<
- StatefulTestingEqual> {
- template <class T, class U>
- bool operator()(const T& t, const U& u) const {
- return t == u;
- }
-};
-
-// It is expected that Alloc() == Alloc() for all allocators so we cannot use
-// WithId base. We need to explicitly assign ids.
-template <class T = int>
-struct Alloc : std::allocator<T> {
- using propagate_on_container_swap = std::true_type;
-
- // Using old paradigm for this to ensure compatibility.
- explicit Alloc(size_t id = 0) : id_(id) {}
-
- Alloc(const Alloc&) = default;
- Alloc& operator=(const Alloc&) = default;
-
- template <class U>
- Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
-
- template <class U>
- struct rebind {
- using other = Alloc<U>;
- };
-
- size_t id() const { return id_; }
-
- friend bool operator==(const Alloc& a, const Alloc& b) {
- return a.id_ == b.id_;
- }
- friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
-
- private:
- size_t id_ = (std::numeric_limits<size_t>::max)();
-};
-
-template <class Map>
-auto items(const Map& m) -> std::vector<
- std::pair<typename Map::key_type, typename Map::mapped_type>> {
- using std::get;
- std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
- res.reserve(m.size());
- for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
- return res;
-}
-
-template <class Set>
-auto keys(const Set& s)
- -> std::vector<typename std::decay<typename Set::key_type>::type> {
- std::vector<typename std::decay<typename Set::key_type>::type> res;
- res.reserve(s.size());
- for (const auto& v : s) res.emplace_back(v);
- return res;
-}
-
-} // namespace container_internal
+namespace container_internal {
+namespace hash_testing_internal {
+
+template <class Derived>
+struct WithId {
+ WithId() : id_(next_id<Derived>()) {}
+ WithId(const WithId& that) : id_(that.id_) {}
+ WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
+ WithId& operator=(const WithId& that) {
+ id_ = that.id_;
+ return *this;
+ }
+ WithId& operator=(WithId&& that) {
+ id_ = that.id_;
+ that.id_ = 0;
+ return *this;
+ }
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const WithId& a, const WithId& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
+
+ protected:
+ explicit WithId(size_t id) : id_(id) {}
+
+ private:
+ size_t id_;
+
+ template <class T>
+ static size_t next_id() {
+ // 0 is reserved for moved from state.
+ static size_t gId = 1;
+ return gId++;
+ }
+};
+
+} // namespace hash_testing_internal
+
+struct NonStandardLayout {
+ NonStandardLayout() {}
+ explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
+ virtual ~NonStandardLayout() {}
+
+ friend bool operator==(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value == b.value;
+ }
+ friend bool operator!=(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value != b.value;
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const NonStandardLayout& v) {
+ return H::combine(std::move(h), v.value);
+ }
+
+ std::string value;
+};
+
+struct StatefulTestingHash
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingHash> {
+ template <class T>
+ size_t operator()(const T& t) const {
+ return absl::Hash<T>{}(t);
+ }
+};
+
+struct StatefulTestingEqual
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingEqual> {
+ template <class T, class U>
+ bool operator()(const T& t, const U& u) const {
+ return t == u;
+ }
+};
+
+// It is expected that Alloc() == Alloc() for all allocators so we cannot use
+// WithId base. We need to explicitly assign ids.
+template <class T = int>
+struct Alloc : std::allocator<T> {
+ using propagate_on_container_swap = std::true_type;
+
+ // Using old paradigm for this to ensure compatibility.
+ explicit Alloc(size_t id = 0) : id_(id) {}
+
+ Alloc(const Alloc&) = default;
+ Alloc& operator=(const Alloc&) = default;
+
+ template <class U>
+ Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
+
+ template <class U>
+ struct rebind {
+ using other = Alloc<U>;
+ };
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const Alloc& a, const Alloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = (std::numeric_limits<size_t>::max)();
+};
+
+template <class Map>
+auto items(const Map& m) -> std::vector<
+ std::pair<typename Map::key_type, typename Map::mapped_type>> {
+ using std::get;
+ std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
+ res.reserve(m.size());
+ for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
+ return res;
+}
+
+template <class Set>
+auto keys(const Set& s)
+ -> std::vector<typename std::decay<typename Set::key_type>::type> {
+ std::vector<typename std::decay<typename Set::key_type>::type> res;
+ res.reserve(s.size());
+ for (const auto& v : s) res.emplace_back(v);
+ return res;
+}
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
-// where the unordered containers are missing certain constructors that
-// take allocator arguments. This test is defined ad-hoc for the platforms
-// we care about (notably Crosstool 17) because libstdcxx's useless
-// versioning scheme precludes a more principled solution.
-// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
-// "the unordered associative containers in <unordered_map> and <unordered_set>
-// meet the allocator-aware container requirements;"
-#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
-( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
-#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
-#else
-#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
-#endif
-
-#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+} // namespace absl
+
+// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
+// where the unordered containers are missing certain constructors that
+// take allocator arguments. This test is defined ad-hoc for the platforms
+// we care about (notably Crosstool 17) because libstdcxx's useless
+// versioning scheme precludes a more principled solution.
+// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
+// "the unordered associative containers in <unordered_map> and <unordered_set>
+// meet the allocator-aware container requirements;"
+#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
+( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
+#else
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
index 46c97b18a2..9abea32901 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -1,40 +1,40 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
-#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
-
-#include <cstddef>
-#include <memory>
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <memory>
#include <new>
-#include <type_traits>
-#include <utility>
-
-#include "absl/meta/type_traits.h"
-
-namespace absl {
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// Defines how slots are initialized/destroyed/moved.
-template <class Policy, class = void>
-struct hash_policy_traits {
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct hash_policy_traits {
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
- private:
- struct ReturnKey {
+ private:
+ struct ReturnKey {
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
@@ -51,158 +51,158 @@ struct hash_policy_traits {
return std::forward<Key>(k);
}
- // When Key=T&, we forward the lvalue reference.
- // When Key=T, we return by value to avoid a dangling reference.
- // eg, for string_hash_map.
- template <class Key, class... Args>
+ // When Key=T&, we forward the lvalue reference.
+ // When Key=T, we return by value to avoid a dangling reference.
+ // eg, for string_hash_map.
+ template <class Key, class... Args>
auto operator()(Key&& k, const Args&...) const
-> decltype(Impl(std::forward<Key>(k), 0)) {
return Impl(std::forward<Key>(k), 0);
- }
- };
-
- template <class P = Policy, class = void>
- struct ConstantIteratorsImpl : std::false_type {};
-
- template <class P>
- struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
- : P::constant_iterators {};
-
- public:
- // The actual object stored in the hash table.
- using slot_type = typename Policy::slot_type;
-
- // The argument type for insertions into the hashtable. This is different
- // from value_type for increased performance. See initializer_list constructor
- // and insert() member functions for more details.
- using init_type = typename Policy::init_type;
-
- using reference = decltype(Policy::element(std::declval<slot_type*>()));
- using pointer = typename std::remove_reference<reference>::type*;
- using value_type = typename std::remove_reference<reference>::type;
-
- // Policies can set this variable to tell raw_hash_set that all iterators
- // should be constant, even `iterator`. This is useful for set-like
- // containers.
- // Defaults to false if not provided by the policy.
- using constant_iterators = ConstantIteratorsImpl<>;
-
- // PRECONDITION: `slot` is UNINITIALIZED
- // POSTCONDITION: `slot` is INITIALIZED
- template <class Alloc, class... Args>
- static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
- Policy::construct(alloc, slot, std::forward<Args>(args)...);
- }
-
- // PRECONDITION: `slot` is INITIALIZED
- // POSTCONDITION: `slot` is UNINITIALIZED
- template <class Alloc>
- static void destroy(Alloc* alloc, slot_type* slot) {
- Policy::destroy(alloc, slot);
- }
-
- // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
- // allocator inside `old_slot` to `new_slot` can be transferred.
- //
- // OPTIONAL: defaults to:
- //
- // clone(new_slot, std::move(*old_slot));
- // destroy(old_slot);
- //
- // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
- // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
- // UNINITIALIZED
- template <class Alloc>
- static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
- transfer_impl(alloc, new_slot, old_slot, 0);
- }
-
- // PRECONDITION: `slot` is INITIALIZED
- // POSTCONDITION: `slot` is INITIALIZED
- template <class P = Policy>
- static auto element(slot_type* slot) -> decltype(P::element(slot)) {
- return P::element(slot);
- }
-
- // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
- //
- // If `slot` is nullptr, returns the constant amount of memory owned by any
- // full slot or -1 if slots own variable amounts of memory.
- //
- // PRECONDITION: `slot` is INITIALIZED or nullptr
- template <class P = Policy>
- static size_t space_used(const slot_type* slot) {
- return P::space_used(slot);
- }
-
- // Provides generalized access to the key for elements, both for elements in
- // the table and for elements that have not yet been inserted (or even
- // constructed). We would like an API that allows us to say: `key(args...)`
- // but we cannot do that for all cases, so we use this more general API that
- // can be used for many things, including the following:
- //
- // - Given an element in a table, get its key.
- // - Given an element initializer, get its key.
- // - Given `emplace()` arguments, get the element key.
- //
- // Implementations of this must adhere to a very strict technical
- // specification around aliasing and consuming arguments:
- //
- // Let `value_type` be the result type of `element()` without ref- and
- // cv-qualifiers. The first argument is a functor, the rest are constructor
- // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
- // `k` is the element key, and `xs...` are the new constructor arguments for
- // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
- // `ts...`. The key won't be touched once `xs...` are used to construct an
- // element; `ts...` won't be touched at all, which allows `apply()` to consume
- // any rvalues among them.
- //
- // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
- // trigger a hard compile error unless it originates from `f`. In other words,
- // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
- // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
- //
- // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
- // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
- template <class F, class... Ts, class P = Policy>
- static auto apply(F&& f, Ts&&... ts)
- -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
- return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
- }
-
- // Returns the "key" portion of the slot.
- // Used for node handle manipulation.
- template <class P = Policy>
+ }
+ };
+
+ template <class P = Policy, class = void>
+ struct ConstantIteratorsImpl : std::false_type {};
+
+ template <class P>
+ struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
+ : P::constant_iterators {};
+
+ public:
+ // The actual object stored in the hash table.
+ using slot_type = typename Policy::slot_type;
+
+ // The argument type for insertions into the hashtable. This is different
+ // from value_type for increased performance. See initializer_list constructor
+ // and insert() member functions for more details.
+ using init_type = typename Policy::init_type;
+
+ using reference = decltype(Policy::element(std::declval<slot_type*>()));
+ using pointer = typename std::remove_reference<reference>::type*;
+ using value_type = typename std::remove_reference<reference>::type;
+
+ // Policies can set this variable to tell raw_hash_set that all iterators
+ // should be constant, even `iterator`. This is useful for set-like
+ // containers.
+ // Defaults to false if not provided by the policy.
+ using constant_iterators = ConstantIteratorsImpl<>;
+
+ // PRECONDITION: `slot` is UNINITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ Policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is UNINITIALIZED
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::destroy(alloc, slot);
+ }
+
+ // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+ // allocator inside `old_slot` to `new_slot` can be transferred.
+ //
+ // OPTIONAL: defaults to:
+ //
+ // clone(new_slot, std::move(*old_slot));
+ // destroy(old_slot);
+ //
+ // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+ // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+ // UNINITIALIZED
+ template <class Alloc>
+ static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+ transfer_impl(alloc, new_slot, old_slot, 0);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class P = Policy>
+ static auto element(slot_type* slot) -> decltype(P::element(slot)) {
+ return P::element(slot);
+ }
+
+ // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
+ //
+ // If `slot` is nullptr, returns the constant amount of memory owned by any
+ // full slot or -1 if slots own variable amounts of memory.
+ //
+ // PRECONDITION: `slot` is INITIALIZED or nullptr
+ template <class P = Policy>
+ static size_t space_used(const slot_type* slot) {
+ return P::space_used(slot);
+ }
+
+ // Provides generalized access to the key for elements, both for elements in
+ // the table and for elements that have not yet been inserted (or even
+ // constructed). We would like an API that allows us to say: `key(args...)`
+ // but we cannot do that for all cases, so we use this more general API that
+ // can be used for many things, including the following:
+ //
+ // - Given an element in a table, get its key.
+ // - Given an element initializer, get its key.
+ // - Given `emplace()` arguments, get the element key.
+ //
+ // Implementations of this must adhere to a very strict technical
+ // specification around aliasing and consuming arguments:
+ //
+ // Let `value_type` be the result type of `element()` without ref- and
+ // cv-qualifiers. The first argument is a functor, the rest are constructor
+ // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
+ // `k` is the element key, and `xs...` are the new constructor arguments for
+ // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
+ // `ts...`. The key won't be touched once `xs...` are used to construct an
+ // element; `ts...` won't be touched at all, which allows `apply()` to consume
+ // any rvalues among them.
+ //
+ // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
+ // trigger a hard compile error unless it originates from `f`. In other words,
+ // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
+ // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
+ //
+ // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
+ // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
+ template <class F, class... Ts, class P = Policy>
+ static auto apply(F&& f, Ts&&... ts)
+ -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
+ }
+
+ // Returns the "key" portion of the slot.
+ // Used for node handle manipulation.
+ template <class P = Policy>
static auto mutable_key(slot_type* slot)
- -> decltype(P::apply(ReturnKey(), element(slot))) {
- return P::apply(ReturnKey(), element(slot));
- }
-
- // Returns the "value" (as opposed to the "key") portion of the element. Used
- // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
- template <class T, class P = Policy>
- static auto value(T* elem) -> decltype(P::value(elem)) {
- return P::value(elem);
- }
-
- private:
- // Use auto -> decltype as an enabler.
- template <class Alloc, class P = Policy>
- static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, int)
- -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
- P::transfer(alloc, new_slot, old_slot);
- }
- template <class Alloc>
- static void transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, char) {
- construct(alloc, new_slot, std::move(element(old_slot)));
- destroy(alloc, old_slot);
- }
-};
-
-} // namespace container_internal
+ -> decltype(P::apply(ReturnKey(), element(slot))) {
+ return P::apply(ReturnKey(), element(slot));
+ }
+
+ // Returns the "value" (as opposed to the "key") portion of the element. Used
+ // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ private:
+ // Use auto -> decltype as an enabler.
+ template <class Alloc, class P = Policy>
+ static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, int)
+ -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+ P::transfer(alloc, new_slot, old_slot);
+ }
+ template <class Alloc>
+ static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, char) {
+ construct(alloc, new_slot, std::move(element(old_slot)));
+ destroy(alloc, old_slot);
+ }
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug.h
index 19d52121d6..55c0cb2da3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug.h
@@ -1,110 +1,110 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This library provides APIs to debug the probing behavior of hash tables.
-//
-// In general, the probing behavior is a black box for users and only the
-// side effects can be measured in the form of performance differences.
-// These APIs give a glimpse on the actual behavior of the probing algorithms in
-// these hashtables given a specified hash function and a set of elements.
-//
-// The probe count distribution can be used to assess the quality of the hash
-// function for that particular hash table. Note that a hash function that
-// performs well in one hash table implementation does not necessarily performs
-// well in a different one.
-//
-// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
-// absl::{flat,node,string}_hash_{set,map}.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
-#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
-
-#include <cstddef>
-#include <algorithm>
-#include <type_traits>
-#include <vector>
-
-#include "absl/container/internal/hashtable_debug_hooks.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This library provides APIs to debug the probing behavior of hash tables.
+//
+// In general, the probing behavior is a black box for users and only the
+// side effects can be measured in the form of performance differences.
+// These APIs give a glimpse on the actual behavior of the probing algorithms in
+// these hashtables given a specified hash function and a set of elements.
+//
+// The probe count distribution can be used to assess the quality of the hash
+// function for that particular hash table. Note that a hash function that
+// performs well in one hash table implementation does not necessarily performs
+// well in a different one.
+//
+// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
+// absl::{flat,node,string}_hash_{set,map}.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+
+#include <cstddef>
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/container/internal/hashtable_debug_hooks.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// Returns the number of probes required to lookup `key`. Returns 0 for a
-// search with no collisions. Higher values mean more hash collisions occurred;
-// however, the exact meaning of this number varies according to the container
-// type.
-template <typename C>
-size_t GetHashtableDebugNumProbes(
- const C& c, const typename C::key_type& key) {
- return absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess<C>::GetNumProbes(c, key);
-}
-
-// Gets a histogram of the number of probes for each elements in the container.
-// The sum of all the values in the vector is equal to container.size().
-template <typename C>
-std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
- std::vector<size_t> v;
- for (auto it = container.begin(); it != container.end(); ++it) {
- size_t num_probes = GetHashtableDebugNumProbes(
- container,
- absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
- v.resize((std::max)(v.size(), num_probes + 1));
- v[num_probes]++;
- }
- return v;
-}
-
-struct HashtableDebugProbeSummary {
- size_t total_elements;
- size_t total_num_probes;
- double mean;
-};
-
-// Gets a summary of the probe count distribution for the elements in the
-// container.
-template <typename C>
-HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
- auto probes = GetHashtableDebugNumProbesHistogram(container);
- HashtableDebugProbeSummary summary = {};
- for (size_t i = 0; i < probes.size(); ++i) {
- summary.total_elements += probes[i];
- summary.total_num_probes += probes[i] * i;
- }
- summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
- return summary;
-}
-
-// Returns the number of bytes requested from the allocator by the container
-// and not freed.
-template <typename C>
-size_t AllocatedByteSize(const C& c) {
- return absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess<C>::AllocatedByteSize(c);
-}
-
-// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
-// and `c.size()` is equal to `num_elements`.
-template <typename C>
-size_t LowerBoundAllocatedByteSize(size_t num_elements) {
- return absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
-}
-
-} // namespace container_internal
+namespace container_internal {
+
+// Returns the number of probes required to lookup `key`. Returns 0 for a
+// search with no collisions. Higher values mean more hash collisions occurred;
+// however, the exact meaning of this number varies according to the container
+// type.
+template <typename C>
+size_t GetHashtableDebugNumProbes(
+ const C& c, const typename C::key_type& key) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::GetNumProbes(c, key);
+}
+
+// Gets a histogram of the number of probes for each elements in the container.
+// The sum of all the values in the vector is equal to container.size().
+template <typename C>
+std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
+ std::vector<size_t> v;
+ for (auto it = container.begin(); it != container.end(); ++it) {
+ size_t num_probes = GetHashtableDebugNumProbes(
+ container,
+ absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
+ v.resize((std::max)(v.size(), num_probes + 1));
+ v[num_probes]++;
+ }
+ return v;
+}
+
+struct HashtableDebugProbeSummary {
+ size_t total_elements;
+ size_t total_num_probes;
+ double mean;
+};
+
+// Gets a summary of the probe count distribution for the elements in the
+// container.
+template <typename C>
+HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
+ auto probes = GetHashtableDebugNumProbesHistogram(container);
+ HashtableDebugProbeSummary summary = {};
+ for (size_t i = 0; i < probes.size(); ++i) {
+ summary.total_elements += probes[i];
+ summary.total_num_probes += probes[i] * i;
+ }
+ summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
+ return summary;
+}
+
+// Returns the number of bytes requested from the allocator by the container
+// and not freed.
+template <typename C>
+size_t AllocatedByteSize(const C& c) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::AllocatedByteSize(c);
+}
+
+// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
+// and `c.size()` is equal to `num_elements`.
+template <typename C>
+size_t LowerBoundAllocatedByteSize(size_t num_elements) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
+}
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h
index 3e9ea5954e..612f3220dc 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h
@@ -1,85 +1,85 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Provides the internal API for hashtable_debug.h.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
-#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
-
-#include <cstddef>
-
-#include <algorithm>
-#include <type_traits>
-#include <vector>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Provides the internal API for hashtable_debug.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+
+#include <cstddef>
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-namespace hashtable_debug_internal {
-
-// If it is a map, call get<0>().
-using std::get;
-template <typename T, typename = typename T::mapped_type>
-auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
- return get<0>(pair);
-}
-
-// If it is not a map, return the value directly.
-template <typename T>
-const typename T::key_type& GetKey(const typename T::key_type& key, char) {
- return key;
-}
-
-// Containers should specialize this to provide debug information for that
-// container.
-template <class Container, typename Enabler = void>
-struct HashtableDebugAccess {
- // Returns the number of probes required to find `key` in `c`. The "number of
- // probes" is a concept that can vary by container. Implementations should
- // return 0 when `key` was found in the minimum number of operations and
- // should increment the result for each non-trivial operation required to find
- // `key`.
- //
- // The default implementation uses the bucket api from the standard and thus
- // works for `std::unordered_*` containers.
- static size_t GetNumProbes(const Container& c,
- const typename Container::key_type& key) {
- if (!c.bucket_count()) return {};
- size_t num_probes = 0;
- size_t bucket = c.bucket(key);
- for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
- if (it == e) return num_probes;
- if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
- }
- }
-
- // Returns the number of bytes requested from the allocator by the container
- // and not freed.
- //
- // static size_t AllocatedByteSize(const Container& c);
-
- // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
- // `Container` and `c.size()` is equal to `num_elements`.
- //
- // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
-};
-
-} // namespace hashtable_debug_internal
-} // namespace container_internal
+namespace container_internal {
+namespace hashtable_debug_internal {
+
+// If it is a map, call get<0>().
+using std::get;
+template <typename T, typename = typename T::mapped_type>
+auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
+ return get<0>(pair);
+}
+
+// If it is not a map, return the value directly.
+template <typename T>
+const typename T::key_type& GetKey(const typename T::key_type& key, char) {
+ return key;
+}
+
+// Containers should specialize this to provide debug information for that
+// container.
+template <class Container, typename Enabler = void>
+struct HashtableDebugAccess {
+ // Returns the number of probes required to find `key` in `c`. The "number of
+ // probes" is a concept that can vary by container. Implementations should
+ // return 0 when `key` was found in the minimum number of operations and
+ // should increment the result for each non-trivial operation required to find
+ // `key`.
+ //
+ // The default implementation uses the bucket api from the standard and thus
+ // works for `std::unordered_*` containers.
+ static size_t GetNumProbes(const Container& c,
+ const typename Container::key_type& key) {
+ if (!c.bucket_count()) return {};
+ size_t num_probes = 0;
+ size_t bucket = c.bucket(key);
+ for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
+ if (it == e) return num_probes;
+ if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
+ }
+ }
+
+ // Returns the number of bytes requested from the allocator by the container
+ // and not freed.
+ //
+ // static size_t AllocatedByteSize(const Container& c);
+
+ // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
+ // `Container` and `c.size()` is equal to `num_elements`.
+ //
+ // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index 40cce0479e..0079c5a153 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -1,190 +1,190 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/container/internal/hashtablez_sampler.h"
-
-#include <atomic>
-#include <cassert>
-#include <cmath>
-#include <functional>
-#include <limits>
-
-#include "absl/base/attributes.h"
-#include "absl/container/internal/have_sse.h"
-#include "absl/debugging/stacktrace.h"
-#include "absl/memory/memory.h"
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-constexpr int HashtablezInfo::kMaxStackDepth;
-
-namespace {
-ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
- false
-};
-ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
-
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+ false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
- g_exponential_biased_generator;
-#endif
-
-} // namespace
-
+ g_exponential_biased_generator;
+#endif
+
+} // namespace
+
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-
+
HashtablezSampler& GlobalHashtablezSampler() {
- static auto* sampler = new HashtablezSampler();
- return *sampler;
-}
-
+ static auto* sampler = new HashtablezSampler();
+ return *sampler;
+}
+
// TODO(bradleybear): The comments at this constructors declaration say that the
// fields are not initialized, but this definition does initialize the fields.
// Something needs to be cleaned up.
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
-HashtablezInfo::~HashtablezInfo() = default;
-
-void HashtablezInfo::PrepareForSampling() {
- capacity.store(0, std::memory_order_relaxed);
- size.store(0, std::memory_order_relaxed);
- num_erases.store(0, std::memory_order_relaxed);
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+ capacity.store(0, std::memory_order_relaxed);
+ size.store(0, std::memory_order_relaxed);
+ num_erases.store(0, std::memory_order_relaxed);
num_rehashes.store(0, std::memory_order_relaxed);
- max_probe_length.store(0, std::memory_order_relaxed);
- total_probe_length.store(0, std::memory_order_relaxed);
- hashes_bitwise_or.store(0, std::memory_order_relaxed);
- hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+ max_probe_length.store(0, std::memory_order_relaxed);
+ total_probe_length.store(0, std::memory_order_relaxed);
+ hashes_bitwise_or.store(0, std::memory_order_relaxed);
+ hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
hashes_bitwise_xor.store(0, std::memory_order_relaxed);
max_reserve.store(0, std::memory_order_relaxed);
-
- create_time = absl::Now();
- // The inliner makes hardcoded skip_count difficult (especially when combined
- // with LTO). We use the ability to exclude stacks by regex when encoding
- // instead.
- depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
- /* skip_count= */ 0);
-}
-
-static bool ShouldForceSampling() {
- enum ForceState {
- kDontForce,
- kForce,
- kUninitialized
- };
- ABSL_CONST_INIT static std::atomic<ForceState> global_state{
- kUninitialized};
- ForceState state = global_state.load(std::memory_order_relaxed);
- if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
-
- if (state == kUninitialized) {
+
+ create_time = absl::Now();
+ // The inliner makes hardcoded skip_count difficult (especially when combined
+ // with LTO). We use the ability to exclude stacks by regex when encoding
+ // instead.
+ depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+ /* skip_count= */ 0);
+}
+
+static bool ShouldForceSampling() {
+ enum ForceState {
+ kDontForce,
+ kForce,
+ kUninitialized
+ };
+ ABSL_CONST_INIT static std::atomic<ForceState> global_state{
+ kUninitialized};
+ ForceState state = global_state.load(std::memory_order_relaxed);
+ if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
+
+ if (state == kUninitialized) {
state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
? kForce
: kDontForce;
- global_state.store(state, std::memory_order_relaxed);
- }
- return state == kForce;
-}
-
+ global_state.store(state, std::memory_order_relaxed);
+ }
+ return state == kForce;
+}
+
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
- if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
+ if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
+ *next_sample = 1;
HashtablezInfo* result = GlobalHashtablezSampler().Register();
result->inline_element_size = inline_element_size;
return result;
- }
-
+ }
+
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
- return nullptr;
-#else
- bool first = *next_sample < 0;
+ *next_sample = std::numeric_limits<int64_t>::max();
+ return nullptr;
+#else
+ bool first = *next_sample < 0;
*next_sample = g_exponential_biased_generator.GetStride(
- g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
- // Small values of interval are equivalent to just sampling next time.
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+ // Small values of interval are equivalent to just sampling next time.
ABSL_ASSERT(*next_sample >= 1);
-
- // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
- // low enough that we will start sampling in a reasonable time, so we just use
- // the default sampling rate.
- if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
-
- // We will only be negative on our first count, so we should just retry in
- // that case.
- if (first) {
- if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+
+ // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+ // low enough that we will start sampling in a reasonable time, so we just use
+ // the default sampling rate.
+ if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+ // We will only be negative on our first count, so we should just retry in
+ // that case.
+ if (first) {
+ if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
- }
-
+ }
+
HashtablezInfo* result = GlobalHashtablezSampler().Register();
result->inline_element_size = inline_element_size;
return result;
-#endif
-}
-
-void UnsampleSlow(HashtablezInfo* info) {
+#endif
+}
+
+void UnsampleSlow(HashtablezInfo* info) {
GlobalHashtablezSampler().Unregister(info);
-}
-
-void RecordInsertSlow(HashtablezInfo* info, size_t hash,
- size_t distance_from_desired) {
- // SwissTables probe in groups of 16, so scale this to count items probes and
- // not offset from desired.
- size_t probe_length = distance_from_desired;
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired) {
+ // SwissTables probe in groups of 16, so scale this to count items probes and
+ // not offset from desired.
+ size_t probe_length = distance_from_desired;
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
- probe_length /= 16;
-#else
- probe_length /= 8;
-#endif
-
- info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
- info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ probe_length /= 16;
+#else
+ probe_length /= 8;
+#endif
+
+ info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
- info->max_probe_length.store(
- std::max(info->max_probe_length.load(std::memory_order_relaxed),
- probe_length),
- std::memory_order_relaxed);
- info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
- info->size.fetch_add(1, std::memory_order_relaxed);
-}
-
-void SetHashtablezEnabled(bool enabled) {
- g_hashtablez_enabled.store(enabled, std::memory_order_release);
-}
-
-void SetHashtablezSampleParameter(int32_t rate) {
- if (rate > 0) {
- g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
- } else {
- ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
- static_cast<long long>(rate)); // NOLINT(runtime/int)
- }
-}
-
-void SetHashtablezMaxSamples(int32_t max) {
- if (max > 0) {
+ info->max_probe_length.store(
+ std::max(info->max_probe_length.load(std::memory_order_relaxed),
+ probe_length),
+ std::memory_order_relaxed);
+ info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+ info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+ g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+ if (rate > 0) {
+ g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+ static_cast<long long>(rate)); // NOLINT(runtime/int)
+ }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+ if (max > 0) {
GlobalHashtablezSampler().SetMaxSamples(max);
- } else {
- ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
- static_cast<long long>(max)); // NOLINT(runtime/int)
- }
-}
-
-} // namespace container_internal
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+ static_cast<long long>(max)); // NOLINT(runtime/int)
+ }
+}
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 91fcdb34a3..c8b2683ac4 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -1,114 +1,114 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// File: hashtablez_sampler.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines the API for a low level library to sample hashtables
-// and collect runtime statistics about them.
-//
-// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
-// store information about a single sample.
-//
-// `Record*` methods store information into samples.
-// `Sample()` and `Unsample()` make use of a single global sampler with
-// properties controlled by the flags hashtablez_enabled,
-// hashtablez_sample_rate, and hashtablez_max_samples.
-//
-// WARNING
-//
-// Using this sampling API may cause sampled Swiss tables to use the global
-// allocator (operator `new`) in addition to any custom allocator. If you
-// are using a table in an unusual circumstance where allocation or calling a
-// linux syscall is unacceptable, this could interfere.
-//
-// This utility is internal-only. Use at your own risk.
-
-#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
-#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
-
-#include <atomic>
-#include <functional>
-#include <memory>
-#include <vector>
-
-#include "absl/base/internal/per_thread_tls.h"
-#include "absl/base/optimization.h"
-#include "absl/container/internal/have_sse.h"
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator. If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
#include "absl/profiling/internal/sample_recorder.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/utility/utility.h"
-
-namespace absl {
+#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// Stores information about a sampled hashtable. All mutations to this *must*
-// be made through `Record*` functions below. All reads from this *must* only
-// occur in the callback to `HashtablezSampler::Iterate`.
+namespace container_internal {
+
+// Stores information about a sampled hashtable. All mutations to this *must*
+// be made through `Record*` functions below. All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
- // Constructs the object but does not fill in any fields.
- HashtablezInfo();
- ~HashtablezInfo();
- HashtablezInfo(const HashtablezInfo&) = delete;
- HashtablezInfo& operator=(const HashtablezInfo&) = delete;
-
- // Puts the object into a clean state, fills in the logically `const` members,
- // blocking for any readers that are currently sampling the object.
- void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
-
- // These fields are mutated by the various Record* APIs and need to be
- // thread-safe.
- std::atomic<size_t> capacity;
- std::atomic<size_t> size;
- std::atomic<size_t> num_erases;
+ // Constructs the object but does not fill in any fields.
+ HashtablezInfo();
+ ~HashtablezInfo();
+ HashtablezInfo(const HashtablezInfo&) = delete;
+ HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+ // Puts the object into a clean state, fills in the logically `const` members,
+ // blocking for any readers that are currently sampling the object.
+ void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+ // These fields are mutated by the various Record* APIs and need to be
+ // thread-safe.
+ std::atomic<size_t> capacity;
+ std::atomic<size_t> size;
+ std::atomic<size_t> num_erases;
std::atomic<size_t> num_rehashes;
- std::atomic<size_t> max_probe_length;
- std::atomic<size_t> total_probe_length;
- std::atomic<size_t> hashes_bitwise_or;
- std::atomic<size_t> hashes_bitwise_and;
+ std::atomic<size_t> max_probe_length;
+ std::atomic<size_t> total_probe_length;
+ std::atomic<size_t> hashes_bitwise_or;
+ std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
-
- // All of the fields below are set by `PrepareForSampling`, they must not be
- // mutated in `Record*` functions. They are logically `const` in that sense.
- // These are guarded by init_mu, but that is not externalized to clients, who
- // can only read them during `HashtablezSampler::Iterate` which will hold the
- // lock.
- static constexpr int kMaxStackDepth = 64;
- absl::Time create_time;
- int32_t depth;
- void* stack[kMaxStackDepth];
+
+ // All of the fields below are set by `PrepareForSampling`, they must not be
+ // mutated in `Record*` functions. They are logically `const` in that sense.
+ // These are guarded by init_mu, but that is not externalized to clients, who
+ // can only read them during `HashtablezSampler::Iterate` which will hold the
+ // lock.
+ static constexpr int kMaxStackDepth = 64;
+ absl::Time create_time;
+ int32_t depth;
+ void* stack[kMaxStackDepth];
size_t inline_element_size;
-};
-
-inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
- total_probe_length /= 16;
-#else
- total_probe_length /= 8;
-#endif
- info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
- info->num_erases.store(0, std::memory_order_relaxed);
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed);
-}
-
+}
+
inline void RecordReservationSlow(HashtablezInfo* info,
size_t target_capacity) {
info->max_reserve.store(
@@ -121,69 +121,69 @@ inline void RecordClearedReservationSlow(HashtablezInfo* info) {
info->max_reserve.store(0, std::memory_order_relaxed);
}
-inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
- size_t capacity) {
- info->size.store(size, std::memory_order_relaxed);
- info->capacity.store(capacity, std::memory_order_relaxed);
- if (size == 0) {
- // This is a clear, reset the total/num_erases too.
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
- }
-}
-
-void RecordInsertSlow(HashtablezInfo* info, size_t hash,
- size_t distance_from_desired);
-
-inline void RecordEraseSlow(HashtablezInfo* info) {
- info->size.fetch_sub(1, std::memory_order_relaxed);
+ }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_erases.store(
1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed);
-}
-
+}
+
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
-void UnsampleSlow(HashtablezInfo* info);
-
+void UnsampleSlow(HashtablezInfo* info);
+
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-class HashtablezInfoHandle {
- public:
- explicit HashtablezInfoHandle() : info_(nullptr) {}
- explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
- ~HashtablezInfoHandle() {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- UnsampleSlow(info_);
- }
-
- HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
- HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
-
- HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
- : info_(absl::exchange(o.info_, nullptr)) {}
- HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
- if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
- UnsampleSlow(info_);
- }
- info_ = absl::exchange(o.info_, nullptr);
- return *this;
- }
-
- inline void RecordStorageChanged(size_t size, size_t capacity) {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordStorageChangedSlow(info_, size, capacity);
- }
-
- inline void RecordRehash(size_t total_probe_length) {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordRehashSlow(info_, total_probe_length);
- }
-
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() : info_(nullptr) {}
+ explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+ ~HashtablezInfoHandle() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ UnsampleSlow(info_);
+ }
+
+ HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+ HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+ HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+ : info_(absl::exchange(o.info_, nullptr)) {}
+ HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+ if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+ UnsampleSlow(info_);
+ }
+ info_ = absl::exchange(o.info_, nullptr);
+ return *this;
+ }
+
+ inline void RecordStorageChanged(size_t size, size_t capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordStorageChangedSlow(info_, size, capacity);
+ }
+
+ inline void RecordRehash(size_t total_probe_length) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordRehashSlow(info_, total_probe_length);
+ }
+
inline void RecordReservation(size_t target_capacity) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordReservationSlow(info_, target_capacity);
@@ -194,25 +194,25 @@ class HashtablezInfoHandle {
RecordClearedReservationSlow(info_);
}
- inline void RecordInsert(size_t hash, size_t distance_from_desired) {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordInsertSlow(info_, hash, distance_from_desired);
- }
-
- inline void RecordErase() {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordEraseSlow(info_);
- }
-
- friend inline void swap(HashtablezInfoHandle& lhs,
- HashtablezInfoHandle& rhs) {
- std::swap(lhs.info_, rhs.info_);
- }
-
- private:
- friend class HashtablezInfoHandlePeer;
- HashtablezInfo* info_;
-};
+ inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordInsertSlow(info_, hash, distance_from_desired);
+ }
+
+ inline void RecordErase() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordEraseSlow(info_);
+ }
+
+ friend inline void swap(HashtablezInfoHandle& lhs,
+ HashtablezInfoHandle& rhs) {
+ std::swap(lhs.info_, rhs.info_);
+ }
+
+ private:
+ friend class HashtablezInfoHandlePeer;
+ HashtablezInfo* info_;
+};
#else
// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
// be removed by the linker, in order to reduce the binary size.
@@ -220,7 +220,7 @@ class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t) {}
-
+
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordReservation(size_t /*target_capacity*/) {}
@@ -234,48 +234,48 @@ class HashtablezInfoHandle {
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-
-// Returns an RAII sampling handle that manages registration and unregistation
-// with the global sampler.
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
- return HashtablezInfoHandle(nullptr);
- }
+ if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ return HashtablezInfoHandle(nullptr);
+ }
return HashtablezInfoHandle(
SampleSlow(&global_next_sample, inline_element_size));
-#else
- return HashtablezInfoHandle(nullptr);
-#endif // !ABSL_PER_THREAD_TLS
-}
-
+#else
+ return HashtablezInfoHandle(nullptr);
+#endif // !ABSL_PER_THREAD_TLS
+}
+
using HashtablezSampler =
::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
-
+
// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();
-
-// Enables or disables sampling for Swiss tables.
-void SetHashtablezEnabled(bool enabled);
-
-// Sets the rate at which Swiss tables will be sampled.
-void SetHashtablezSampleParameter(int32_t rate);
-
-// Sets a soft max for the number of samples that will be kept.
-void SetHashtablezMaxSamples(int32_t max);
-
-// Configuration override.
-// This allows process-wide sampling without depending on order of
-// initialization of static storage duration objects.
-// The definition of this constant is weak, which allows us to inject a
-// different value for it at link time.
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
-
-} // namespace container_internal
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
index ed35a7eec3..5eafd7b227 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -1,31 +1,31 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/container/internal/hashtablez_sampler.h"
-
-#include "absl/base/attributes.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include "absl/base/attributes.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// See hashtablez_sampler.h for details.
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
AbslContainerInternalSampleEverything)() {
- return false;
-}
-
-} // namespace container_internal
+ return false;
+}
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/have_sse.h b/contrib/restricted/abseil-cpp/absl/container/internal/have_sse.h
index e75e1a16d3..91cff3bca2 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/have_sse.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/have_sse.h
@@ -1,50 +1,50 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Shared config probing for SSE instructions used in Swiss tables.
-#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
-#else
+#else
#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
-#endif
-#endif
-
+#endif
+#endif
+
#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#ifdef __SSSE3__
+#ifdef __SSSE3__
#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
-#else
+#else
#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
-#endif
-#endif
-
+#endif
+#endif
+
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
!ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
+#error "Bad configuration!"
+#endif
+
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
+#include <emmintrin.h>
+#endif
+
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#include <tmmintrin.h>
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 1d7d6cda72..a4d013f5df 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -1,41 +1,41 @@
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
-#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
-
-#include <algorithm>
-#include <cstddef>
-#include <cstring>
-#include <iterator>
-#include <limits>
-#include <memory>
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
#include <new>
#include <type_traits>
-#include <utility>
-
+#include <utility>
+
#include "absl/base/attributes.h"
-#include "absl/base/macros.h"
-#include "absl/container/internal/compressed_tuple.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
-#include "absl/types/span.h"
-
-namespace absl {
+#include "absl/base/macros.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace inlined_vector_internal {
-
+namespace inlined_vector_internal {
+
// GCC does not deal very well with the below code
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
@@ -72,23 +72,23 @@ using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
template <typename A>
using MoveIterator = typename std::move_iterator<Iterator<A>>;
-template <typename Iterator>
-using IsAtLeastForwardIterator = std::is_convertible<
- typename std::iterator_traits<Iterator>::iterator_category,
- std::forward_iterator_tag>;
-
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>;
+
template <typename A>
-using IsMemcpyOk =
+using IsMemcpyOk =
absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
absl::is_trivially_copy_constructible<ValueType<A>>,
absl::is_trivially_copy_assignable<ValueType<A>>,
absl::is_trivially_destructible<ValueType<A>>>;
-
+
template <typename T>
struct TypeIdentity {
using type = T;
};
-
+
// Used for function arguments in template functions to prevent ADL by forcing
// callers to explicitly specify the template parameter.
template <typename T>
@@ -97,14 +97,14 @@ using NoTypeDeduction = typename TypeIdentity<T>::type;
template <typename A>
void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
SizeType<A> destroy_size) {
- if (destroy_first != nullptr) {
+ if (destroy_first != nullptr) {
for (SizeType<A> i = destroy_size; i != 0;) {
- --i;
+ --i;
AllocatorTraits<A>::destroy(allocator, destroy_first + i);
- }
- }
-}
-
+ }
+ }
+}
+
template <typename A>
struct Allocation {
Pointer<A> data;
@@ -132,94 +132,94 @@ void ConstructElements(NoTypeDeduction<A>& allocator,
SizeType<A> construct_size) {
for (SizeType<A> i = 0; i < construct_size; ++i) {
ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
- ABSL_INTERNAL_CATCH_ANY {
+ ABSL_INTERNAL_CATCH_ANY {
DestroyElements<A>(allocator, construct_first, i);
- ABSL_INTERNAL_RETHROW;
- }
- }
-}
-
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
template <typename A, typename ValueAdapter>
void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
SizeType<A> assign_size) {
for (SizeType<A> i = 0; i < assign_size; ++i) {
values.AssignNext(assign_first + i);
- }
-}
-
+ }
+}
+
template <typename A>
-struct StorageView {
+struct StorageView {
Pointer<A> data;
SizeType<A> size;
SizeType<A> capacity;
-};
-
+};
+
template <typename A, typename Iterator>
-class IteratorValueAdapter {
- public:
- explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
-
+class IteratorValueAdapter {
+ public:
+ explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at, *it_);
- ++it_;
- }
-
+ ++it_;
+ }
+
void AssignNext(Pointer<A> assign_at) {
- *assign_at = *it_;
- ++it_;
- }
-
- private:
- Iterator it_;
-};
-
+ *assign_at = *it_;
+ ++it_;
+ }
+
+ private:
+ Iterator it_;
+};
+
template <typename A>
-class CopyValueAdapter {
- public:
+class CopyValueAdapter {
+ public:
explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
-
+
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
- }
-
+ }
+
void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
-
- private:
+
+ private:
ConstPointer<A> ptr_;
-};
-
+};
+
template <typename A>
-class DefaultValueAdapter {
- public:
- explicit DefaultValueAdapter() {}
-
+class DefaultValueAdapter {
+ public:
+ explicit DefaultValueAdapter() {}
+
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at);
- }
-
+ }
+
void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
-};
-
+};
+
template <typename A>
-class AllocationTransaction {
- public:
+class AllocationTransaction {
+ public:
explicit AllocationTransaction(A& allocator)
: allocator_data_(allocator, nullptr), capacity_(0) {}
-
- ~AllocationTransaction() {
- if (DidAllocate()) {
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
- }
- }
-
- AllocationTransaction(const AllocationTransaction&) = delete;
- void operator=(const AllocationTransaction&) = delete;
-
+ }
+ }
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
A& GetAllocator() { return allocator_data_.template get<0>(); }
Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
SizeType<A>& GetCapacity() { return capacity_; }
-
- bool DidAllocate() { return GetData() != nullptr; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
Pointer<A> Allocate(SizeType<A> requested_capacity) {
Allocation<A> result =
@@ -227,8 +227,8 @@ class AllocationTransaction {
GetData() = result.data;
GetCapacity() = result.capacity;
return result.data;
- }
-
+ }
+
ABSL_MUST_USE_RESULT Allocation<A> Release() && {
Allocation<A> result = {GetData(), GetCapacity()};
Reset();
@@ -236,73 +236,73 @@ class AllocationTransaction {
}
private:
- void Reset() {
- GetData() = nullptr;
- GetCapacity() = 0;
- }
-
+ void Reset() {
+ GetData() = nullptr;
+ GetCapacity() = 0;
+ }
+
container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
SizeType<A> capacity_;
-};
-
+};
+
template <typename A>
-class ConstructionTransaction {
- public:
+class ConstructionTransaction {
+ public:
explicit ConstructionTransaction(A& allocator)
: allocator_data_(allocator, nullptr), size_(0) {}
-
- ~ConstructionTransaction() {
- if (DidConstruct()) {
+
+ ~ConstructionTransaction() {
+ if (DidConstruct()) {
DestroyElements<A>(GetAllocator(), GetData(), GetSize());
- }
- }
-
- ConstructionTransaction(const ConstructionTransaction&) = delete;
- void operator=(const ConstructionTransaction&) = delete;
-
+ }
+ }
+
+ ConstructionTransaction(const ConstructionTransaction&) = delete;
+ void operator=(const ConstructionTransaction&) = delete;
+
A& GetAllocator() { return allocator_data_.template get<0>(); }
Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
SizeType<A>& GetSize() { return size_; }
-
- bool DidConstruct() { return GetData() != nullptr; }
- template <typename ValueAdapter>
+
+ bool DidConstruct() { return GetData() != nullptr; }
+ template <typename ValueAdapter>
void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
ConstructElements<A>(GetAllocator(), data, values, size);
- GetData() = data;
- GetSize() = size;
- }
+ GetData() = data;
+ GetSize() = size;
+ }
void Commit() && {
- GetData() = nullptr;
- GetSize() = 0;
- }
-
- private:
+ GetData() = nullptr;
+ GetSize() = 0;
+ }
+
+ private:
container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
SizeType<A> size_;
-};
-
-template <typename T, size_t N, typename A>
-class Storage {
- public:
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
- return current_capacity * 2;
- }
-
+ return current_capacity * 2;
+ }
+
static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
SizeType<A> requested_capacity) {
- return (std::max)(NextCapacity(current_capacity), requested_capacity);
- }
-
- // ---------------------------------------------------------------------------
- // Storage Constructors and Destructor
- // ---------------------------------------------------------------------------
-
+ return (std::max)(NextCapacity(current_capacity), requested_capacity);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
-
+
explicit Storage(const A& allocator)
: metadata_(allocator, /* size and is_allocated */ 0) {}
-
- ~Storage() {
+
+ ~Storage() {
if (GetSizeAndIsAllocated() == 0) {
// Empty and not allocated; nothing to do.
} else if (IsMemcpyOk<A>::value) {
@@ -311,162 +311,162 @@ class Storage {
} else {
DestroyContents();
}
- }
-
- // ---------------------------------------------------------------------------
- // Storage Member Accessors
- // ---------------------------------------------------------------------------
-
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Accessors
+ // ---------------------------------------------------------------------------
+
SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
-
+
const SizeType<A>& GetSizeAndIsAllocated() const {
- return metadata_.template get<1>();
- }
-
+ return metadata_.template get<1>();
+ }
+
SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
-
- bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
-
+
+ bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
-
+
ConstPointer<A> GetAllocatedData() const {
- return data_.allocated.allocated_data;
- }
-
+ return data_.allocated.allocated_data;
+ }
+
Pointer<A> GetInlinedData() {
return reinterpret_cast<Pointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
- }
-
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
ConstPointer<A> GetInlinedData() const {
return reinterpret_cast<ConstPointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
- }
-
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
SizeType<A> GetAllocatedCapacity() const {
- return data_.allocated.allocated_capacity;
- }
-
+ return data_.allocated.allocated_capacity;
+ }
+
SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
-
+
StorageView<A> MakeStorageView() {
return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()}
: StorageView<A>{GetInlinedData(), GetSize(),
GetInlinedCapacity()};
- }
-
+ }
+
A& GetAllocator() { return metadata_.template get<0>(); }
-
+
const A& GetAllocator() const { return metadata_.template get<0>(); }
-
- // ---------------------------------------------------------------------------
- // Storage Member Mutators
- // ---------------------------------------------------------------------------
-
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Mutators
+ // ---------------------------------------------------------------------------
+
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
- template <typename ValueAdapter>
+ template <typename ValueAdapter>
void Initialize(ValueAdapter values, SizeType<A> new_size);
-
- template <typename ValueAdapter>
+
+ template <typename ValueAdapter>
void Assign(ValueAdapter values, SizeType<A> new_size);
-
- template <typename ValueAdapter>
+
+ template <typename ValueAdapter>
void Resize(ValueAdapter values, SizeType<A> new_size);
-
- template <typename ValueAdapter>
+
+ template <typename ValueAdapter>
Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count);
-
- template <typename... Args>
+
+ template <typename... Args>
Reference<A> EmplaceBack(Args&&... args);
-
+
Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
-
+
void Reserve(SizeType<A> requested_capacity);
-
- void ShrinkToFit();
-
- void Swap(Storage* other_storage_ptr);
-
- void SetIsAllocated() {
+
+ void ShrinkToFit();
+
+ void Swap(Storage* other_storage_ptr);
+
+ void SetIsAllocated() {
GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
- }
-
- void UnsetIsAllocated() {
+ }
+
+ void UnsetIsAllocated() {
GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
- }
-
+ }
+
void SetSize(SizeType<A> size) {
- GetSizeAndIsAllocated() =
+ GetSizeAndIsAllocated() =
(size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
- }
-
+ }
+
void SetAllocatedSize(SizeType<A> size) {
GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
- }
-
+ }
+
void SetInlinedSize(SizeType<A> size) {
GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
- }
-
+ }
+
void AddSize(SizeType<A> count) {
GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
- }
-
+ }
+
void SubtractSize(SizeType<A> count) {
- assert(count <= GetSize());
-
+ assert(count <= GetSize());
+
GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
- }
-
+ }
+
void SetAllocation(Allocation<A> allocation) {
data_.allocated.allocated_data = allocation.data;
data_.allocated.allocated_capacity = allocation.capacity;
- }
-
- void MemcpyFrom(const Storage& other_storage) {
+ }
+
+ void MemcpyFrom(const Storage& other_storage) {
assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
-
- GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
- data_ = other_storage.data_;
- }
-
- void DeallocateIfAllocated() {
- if (GetIsAllocated()) {
+
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
+ }
+
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
GetAllocatedCapacity());
- }
- }
-
- private:
+ }
+ }
+
+ private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
-
- struct Allocated {
+
+ struct Allocated {
Pointer<A> allocated_data;
SizeType<A> allocated_capacity;
- };
-
- struct Inlined {
+ };
+
+ struct Inlined {
alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
- };
-
- union Data {
- Allocated allocated;
- Inlined inlined;
- };
-
+ };
+
+ union Data {
+ Allocated allocated;
+ Inlined inlined;
+ };
+
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
- Metadata metadata_;
- Data data_;
-};
-
-template <typename T, size_t N, typename A>
+ Metadata metadata_;
+ Data data_;
+};
+
+template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
DestroyElements<A>(GetAllocator(), data, GetSize());
@@ -504,81 +504,81 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
}
template <typename T, size_t N, typename A>
-template <typename ValueAdapter>
+template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
- -> void {
- // Only callable from constructors!
- assert(!GetIsAllocated());
- assert(GetSize() == 0);
-
+ -> void {
+ // Only callable from constructors!
+ assert(!GetIsAllocated());
+ assert(GetSize() == 0);
+
Pointer<A> construct_data;
- if (new_size > GetInlinedCapacity()) {
- // Because this is only called from the `InlinedVector` constructors, it's
- // safe to take on the allocation with size `0`. If `ConstructElements(...)`
- // throws, deallocation will be automatically handled by `~Storage()`.
+ if (new_size > GetInlinedCapacity()) {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
SizeType<A> requested_capacity =
ComputeCapacity(GetInlinedCapacity(), new_size);
Allocation<A> allocation =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
construct_data = allocation.data;
SetAllocation(allocation);
- SetIsAllocated();
- } else {
- construct_data = GetInlinedData();
- }
-
+ SetIsAllocated();
+ } else {
+ construct_data = GetInlinedData();
+ }
+
ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
-
- // Since the initial size was guaranteed to be `0` and the allocated bit is
- // already correct for either case, *adding* `new_size` gives us the correct
- // result faster than setting it directly.
- AddSize(new_size);
-}
-
-template <typename T, size_t N, typename A>
-template <typename ValueAdapter>
+
+ // Since the initial size was guaranteed to be `0` and the allocated bit is
+ // already correct for either case, *adding* `new_size` gives us the correct
+ // result faster than setting it directly.
+ AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
-> void {
StorageView<A> storage_view = MakeStorageView();
-
+
AllocationTransaction<A> allocation_tx(GetAllocator());
-
+
absl::Span<ValueType<A>> assign_loop;
absl::Span<ValueType<A>> construct_loop;
absl::Span<ValueType<A>> destroy_loop;
-
- if (new_size > storage_view.capacity) {
+
+ if (new_size > storage_view.capacity) {
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
- destroy_loop = {storage_view.data, storage_view.size};
- } else if (new_size > storage_view.size) {
- assign_loop = {storage_view.data, storage_view.size};
- construct_loop = {storage_view.data + storage_view.size,
- new_size - storage_view.size};
- } else {
- assign_loop = {storage_view.data, new_size};
- destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
- }
-
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
AssignElements<A>(assign_loop.data(), values, assign_loop.size());
-
+
ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
construct_loop.size());
-
+
DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
-
- if (allocation_tx.DidAllocate()) {
- DeallocateIfAllocated();
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
- SetIsAllocated();
- }
-
- SetSize(new_size);
-}
-
-template <typename T, size_t N, typename A>
-template <typename ValueAdapter>
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
-> void {
StorageView<A> storage_view = MakeStorageView();
@@ -603,117 +603,117 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
-
+
ConstructionTransaction<A> construction_tx(alloc);
construction_tx.Construct(new_data + size, values, new_size - size);
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
(MoveIterator<A>(base)));
ConstructElements<A>(alloc, new_data, move_values, size);
-
+
DestroyElements<A>(alloc, base, size);
std::move(construction_tx).Commit();
- DeallocateIfAllocated();
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
- SetIsAllocated();
- }
- SetSize(new_size);
-}
-
-template <typename T, size_t N, typename A>
-template <typename ValueAdapter>
+ SetIsAllocated();
+ }
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
-
+
SizeType<A> insert_index =
std::distance(ConstIterator<A>(storage_view.data), pos);
SizeType<A> insert_end_index = insert_index + insert_count;
SizeType<A> new_size = storage_view.size + insert_count;
-
- if (new_size > storage_view.capacity) {
+
+ if (new_size > storage_view.capacity) {
AllocationTransaction<A> allocation_tx(GetAllocator());
ConstructionTransaction<A> construction_tx(GetAllocator());
ConstructionTransaction<A> move_construction_tx(GetAllocator());
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
-
+
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
-
+
construction_tx.Construct(new_data + insert_index, values, insert_count);
-
+
move_construction_tx.Construct(new_data, move_values, insert_index);
-
+
ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
move_values, storage_view.size - insert_index);
-
+
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
-
+
std::move(construction_tx).Commit();
std::move(move_construction_tx).Commit();
- DeallocateIfAllocated();
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
-
- SetAllocatedSize(new_size);
+
+ SetAllocatedSize(new_size);
return Iterator<A>(new_data + insert_index);
- } else {
+ } else {
SizeType<A> move_construction_destination_index =
- (std::max)(insert_end_index, storage_view.size);
-
+ (std::max)(insert_end_index, storage_view.size);
+
ConstructionTransaction<A> move_construction_tx(GetAllocator());
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
MoveIterator<A>(storage_view.data +
(move_construction_destination_index - insert_count)));
absl::Span<ValueType<A>> move_construction = {
- storage_view.data + move_construction_destination_index,
- new_size - move_construction_destination_index};
-
+ storage_view.data + move_construction_destination_index,
+ new_size - move_construction_destination_index};
+
Pointer<A> move_assignment_values = storage_view.data + insert_index;
absl::Span<ValueType<A>> move_assignment = {
- storage_view.data + insert_end_index,
- move_construction_destination_index - insert_end_index};
-
+ storage_view.data + insert_end_index,
+ move_construction_destination_index - insert_end_index};
+
absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
move_construction.size()};
-
+
absl::Span<ValueType<A>> insert_construction = {
- insert_assignment.data() + insert_assignment.size(),
- insert_count - insert_assignment.size()};
-
- move_construction_tx.Construct(move_construction.data(),
+ insert_assignment.data() + insert_assignment.size(),
+ insert_count - insert_assignment.size()};
+
+ move_construction_tx.Construct(move_construction.data(),
move_construction_values,
- move_construction.size());
-
+ move_construction.size());
+
for (Pointer<A>
destination = move_assignment.data() + move_assignment.size(),
last_destination = move_assignment.data(),
source = move_assignment_values + move_assignment.size();
- ;) {
- --destination;
- --source;
- if (destination < last_destination) break;
- *destination = std::move(*source);
- }
-
+ ;) {
+ --destination;
+ --source;
+ if (destination < last_destination) break;
+ *destination = std::move(*source);
+ }
+
AssignElements<A>(insert_assignment.data(), values,
insert_assignment.size());
-
+
ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
insert_construction.size());
-
+
std::move(move_construction_tx).Commit();
-
- AddSize(insert_count);
+
+ AddSize(insert_count);
return Iterator<A>(storage_view.data + insert_index);
- }
-}
-
-template <typename T, size_t N, typename A>
-template <typename... Args>
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
const SizeType<A> n = storage_view.size;
@@ -728,7 +728,7 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
// TODO(b/173712035): Annotate with musttail attribute to prevent regression.
return EmplaceBackSlow(std::forward<Args>(args)...);
}
-
+
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
@@ -739,7 +739,7 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
Pointer<A> last_ptr = construct_data + storage_view.size;
-
+
// Construct new element.
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
@@ -747,186 +747,186 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
storage_view.size);
- }
+ }
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
-
+
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
- AddSize(1);
- return *last_ptr;
-}
-
-template <typename T, size_t N, typename A>
+ AddSize(1);
+ return *last_ptr;
+}
+
+template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
-> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
-
+
SizeType<A> erase_size = std::distance(from, to);
SizeType<A> erase_index =
std::distance(ConstIterator<A>(storage_view.data), from);
SizeType<A> erase_end_index = erase_index + erase_size;
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data + erase_end_index));
-
+
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
-
+
DestroyElements<A>(GetAllocator(),
storage_view.data + (storage_view.size - erase_size),
erase_size);
-
- SubtractSize(erase_size);
+
+ SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
-}
-
-template <typename T, size_t N, typename A>
+}
+
+template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
StorageView<A> storage_view = MakeStorageView();
-
- if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
-
+
+ if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
AllocationTransaction<A> allocation_tx(GetAllocator());
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
-
+
SizeType<A> new_requested_capacity =
- ComputeCapacity(storage_view.capacity, requested_capacity);
+ ComputeCapacity(storage_view.capacity, requested_capacity);
Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
-
+
ConstructElements<A>(GetAllocator(), new_data, move_values,
storage_view.size);
-
+
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
-
- DeallocateIfAllocated();
+
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
- SetIsAllocated();
-}
-
-template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::ShrinkToFit() -> void {
- // May only be called on allocated instances!
- assert(GetIsAllocated());
-
+ SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
StorageView<A> storage_view{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()};
-
- if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
-
+
+ if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
AllocationTransaction<A> allocation_tx(GetAllocator());
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
-
+
Pointer<A> construct_data;
- if (storage_view.size > GetInlinedCapacity()) {
+ if (storage_view.size > GetInlinedCapacity()) {
SizeType<A> requested_capacity = storage_view.size;
construct_data = allocation_tx.Allocate(requested_capacity);
if (allocation_tx.GetCapacity() >= storage_view.capacity) {
// Already using the smallest available heap allocation.
return;
}
- } else {
- construct_data = GetInlinedData();
- }
-
- ABSL_INTERNAL_TRY {
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), construct_data, move_values,
storage_view.size);
- }
- ABSL_INTERNAL_CATCH_ANY {
+ }
+ ABSL_INTERNAL_CATCH_ANY {
SetAllocation({storage_view.data, storage_view.capacity});
- ABSL_INTERNAL_RETHROW;
- }
-
+ ABSL_INTERNAL_RETHROW;
+ }
+
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
-
+
MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
storage_view.capacity);
-
- if (allocation_tx.DidAllocate()) {
+
+ if (allocation_tx.DidAllocate()) {
SetAllocation(std::move(allocation_tx).Release());
- } else {
- UnsetIsAllocated();
- }
-}
-
-template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
- using std::swap;
- assert(this != other_storage_ptr);
-
- if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
- swap(data_.allocated, other_storage_ptr->data_.allocated);
- } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
- Storage* small_ptr = this;
- Storage* large_ptr = other_storage_ptr;
- if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
-
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+ using std::swap;
+ assert(this != other_storage_ptr);
+
+ if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+ swap(data_.allocated, other_storage_ptr->data_.allocated);
+ } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+ Storage* small_ptr = this;
+ Storage* large_ptr = other_storage_ptr;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
- swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
- }
-
+ swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+ }
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
-
+
ConstructElements<A>(large_ptr->GetAllocator(),
small_ptr->GetInlinedData() + small_ptr->GetSize(),
move_values,
large_ptr->GetSize() - small_ptr->GetSize());
-
+
DestroyElements<A>(large_ptr->GetAllocator(),
large_ptr->GetInlinedData() + small_ptr->GetSize(),
large_ptr->GetSize() - small_ptr->GetSize());
- } else {
- Storage* allocated_ptr = this;
- Storage* inlined_ptr = other_storage_ptr;
- if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
-
+ } else {
+ Storage* allocated_ptr = this;
+ Storage* inlined_ptr = other_storage_ptr;
+ if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
StorageView<A> allocated_storage_view{
allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
allocated_ptr->GetAllocatedCapacity()};
-
+
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(inlined_ptr->GetInlinedData()));
-
- ABSL_INTERNAL_TRY {
+
+ ABSL_INTERNAL_TRY {
ConstructElements<A>(inlined_ptr->GetAllocator(),
allocated_ptr->GetInlinedData(), move_values,
inlined_ptr->GetSize());
- }
- ABSL_INTERNAL_CATCH_ANY {
+ }
+ ABSL_INTERNAL_CATCH_ANY {
allocated_ptr->SetAllocation(
{allocated_storage_view.data, allocated_storage_view.capacity});
- ABSL_INTERNAL_RETHROW;
- }
-
+ ABSL_INTERNAL_RETHROW;
+ }
+
DestroyElements<A>(inlined_ptr->GetAllocator(),
inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
-
+
inlined_ptr->SetAllocation(
{allocated_storage_view.data, allocated_storage_view.capacity});
- }
-
- swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+ }
+
+ swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
swap(GetAllocator(), other_storage_ptr->GetAllocator());
-}
-
+}
+
// End ignore "array-bounds" and "maybe-uninitialized"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
-} // namespace inlined_vector_internal
+} // namespace inlined_vector_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
index a59a243059..780465614e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
@@ -1,743 +1,743 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// MOTIVATION AND TUTORIAL
-//
-// If you want to put in a single heap allocation N doubles followed by M ints,
-// it's easy if N and M are known at compile time.
-//
-// struct S {
-// double a[N];
-// int b[M];
-// };
-//
-// S* p = new S;
-//
-// But what if N and M are known only in run time? Class template Layout to the
-// rescue! It's a portable generalization of the technique known as struct hack.
-//
-// // This object will tell us everything we need to know about the memory
-// // layout of double[N] followed by int[M]. It's structurally identical to
-// // size_t[2] that stores N and M. It's very cheap to create.
-// const Layout<double, int> layout(N, M);
-//
-// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
-// // memory is needed. We are free to use any allocation function we want as
-// // long as it returns aligned memory.
-// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
-//
-// // Obtain the pointer to the array of doubles.
-// // Equivalent to `reinterpret_cast<double*>(p.get())`.
-// //
-// // We could have written layout.Pointer<0>(p) instead. If all the types are
-// // unique you can use either form, but if some types are repeated you must
-// // use the index form.
-// double* a = layout.Pointer<double>(p.get());
-//
-// // Obtain the pointer to the array of ints.
-// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
-// int* b = layout.Pointer<int>(p);
-//
-// If we are unable to specify sizes of all fields, we can pass as many sizes as
-// we can to `Partial()`. In return, it'll allow us to access the fields whose
-// locations and sizes can be computed from the provided information.
-// `Partial()` comes in handy when the array sizes are embedded into the
-// allocation.
-//
-// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
-// using L = Layout<size_t, size_t, double, int>;
-//
-// unsigned char* Allocate(size_t n, size_t m) {
-// const L layout(1, 1, n, m);
-// unsigned char* p = new unsigned char[layout.AllocSize()];
-// *layout.Pointer<0>(p) = n;
-// *layout.Pointer<1>(p) = m;
-// return p;
-// }
-//
-// void Use(unsigned char* p) {
-// // First, extract N and M.
-// // Specify that the first array has only one element. Using `prefix` we
-// // can access the first two arrays but not more.
-// constexpr auto prefix = L::Partial(1);
-// size_t n = *prefix.Pointer<0>(p);
-// size_t m = *prefix.Pointer<1>(p);
-//
-// // Now we can get pointers to the payload.
-// const L layout(1, 1, n, m);
-// double* a = layout.Pointer<double>(p);
-// int* b = layout.Pointer<int>(p);
-// }
-//
-// The layout we used above combines fixed-size with dynamically-sized fields.
-// This is quite common. Layout is optimized for this use case and generates
-// optimal code. All computations that can be performed at compile time are
-// indeed performed at compile time.
-//
-// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
-// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
-// padding in between arrays.
-//
-// You can manually override the alignment of an array by wrapping the type in
-// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
-// and behavior as `Layout<..., T, ...>` except that the first element of the
-// array of `T` is aligned to `N` (the rest of the elements follow without
-// padding). `N` cannot be less than `alignof(T)`.
-//
-// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
-// memory layouts. Check out the reference or code below to discover more.
-//
-// EXAMPLE
-//
-// // Immutable move-only string with sizeof equal to sizeof(void*). The
-// // string size and the characters are kept in the same heap allocation.
-// class CompactString {
-// public:
-// CompactString(const char* s = "") {
-// const size_t size = strlen(s);
-// // size_t[1] followed by char[size + 1].
-// const L layout(1, size + 1);
-// p_.reset(new unsigned char[layout.AllocSize()]);
-// // If running under ASAN, mark the padding bytes, if any, to catch
-// // memory errors.
-// layout.PoisonPadding(p_.get());
-// // Store the size in the allocation.
-// *layout.Pointer<size_t>(p_.get()) = size;
-// // Store the characters in the allocation.
-// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
-// }
-//
-// size_t size() const {
-// // Equivalent to reinterpret_cast<size_t&>(*p).
-// return *L::Partial().Pointer<size_t>(p_.get());
-// }
-//
-// const char* c_str() const {
-// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
-// // The argument in Partial(1) specifies that we have size_t[1] in front
-// // of the characters.
-// return L::Partial(1).Pointer<char>(p_.get());
-// }
-//
-// private:
-// // Our heap allocation contains a size_t followed by an array of chars.
-// using L = Layout<size_t, char>;
-// std::unique_ptr<unsigned char[]> p_;
-// };
-//
-// int main() {
-// CompactString s = "hello";
-// assert(s.size() == 5);
-// assert(strcmp(s.c_str(), "hello") == 0);
-// }
-//
-// DOCUMENTATION
-//
-// The interface exported by this file consists of:
-// - class `Layout<>` and its public members.
-// - The public members of class `internal_layout::LayoutImpl<>`. That class
-// isn't intended to be used directly, and its name and template parameter
-// list are internal implementation details, but the class itself provides
-// most of the functionality in this file. See comments on its members for
-// detailed documentation.
-//
-// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
-// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
-// creates a `Layout` object, which exposes the same functionality by inheriting
-// from `LayoutImpl<>`.
-
-#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
-#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
-
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <ostream>
-#include <string>
-#include <tuple>
-#include <type_traits>
-#include <typeinfo>
-#include <utility>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// MOTIVATION AND TUTORIAL
+//
+// If you want to put in a single heap allocation N doubles followed by M ints,
+// it's easy if N and M are known at compile time.
+//
+// struct S {
+// double a[N];
+// int b[M];
+// };
+//
+// S* p = new S;
+//
+// But what if N and M are known only in run time? Class template Layout to the
+// rescue! It's a portable generalization of the technique known as struct hack.
+//
+// // This object will tell us everything we need to know about the memory
+// // layout of double[N] followed by int[M]. It's structurally identical to
+// // size_t[2] that stores N and M. It's very cheap to create.
+// const Layout<double, int> layout(N, M);
+//
+// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
+// // memory is needed. We are free to use any allocation function we want as
+// // long as it returns aligned memory.
+// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
+//
+// // Obtain the pointer to the array of doubles.
+// // Equivalent to `reinterpret_cast<double*>(p.get())`.
+// //
+// // We could have written layout.Pointer<0>(p) instead. If all the types are
+// // unique you can use either form, but if some types are repeated you must
+// // use the index form.
+// double* a = layout.Pointer<double>(p.get());
+//
+// // Obtain the pointer to the array of ints.
+// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
+// int* b = layout.Pointer<int>(p);
+//
+// If we are unable to specify sizes of all fields, we can pass as many sizes as
+// we can to `Partial()`. In return, it'll allow us to access the fields whose
+// locations and sizes can be computed from the provided information.
+// `Partial()` comes in handy when the array sizes are embedded into the
+// allocation.
+//
+// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+// using L = Layout<size_t, size_t, double, int>;
+//
+// unsigned char* Allocate(size_t n, size_t m) {
+// const L layout(1, 1, n, m);
+// unsigned char* p = new unsigned char[layout.AllocSize()];
+// *layout.Pointer<0>(p) = n;
+// *layout.Pointer<1>(p) = m;
+// return p;
+// }
+//
+// void Use(unsigned char* p) {
+// // First, extract N and M.
+// // Specify that the first array has only one element. Using `prefix` we
+// // can access the first two arrays but not more.
+// constexpr auto prefix = L::Partial(1);
+// size_t n = *prefix.Pointer<0>(p);
+// size_t m = *prefix.Pointer<1>(p);
+//
+// // Now we can get pointers to the payload.
+// const L layout(1, 1, n, m);
+// double* a = layout.Pointer<double>(p);
+// int* b = layout.Pointer<int>(p);
+// }
+//
+// The layout we used above combines fixed-size with dynamically-sized fields.
+// This is quite common. Layout is optimized for this use case and generates
+// optimal code. All computations that can be performed at compile time are
+// indeed performed at compile time.
+//
+// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
+// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
+// padding in between arrays.
+//
+// You can manually override the alignment of an array by wrapping the type in
+// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding). `N` cannot be less than `alignof(T)`.
+//
+// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
+// memory layouts. Check out the reference or code below to discover more.
+//
+// EXAMPLE
+//
+// // Immutable move-only string with sizeof equal to sizeof(void*). The
+// // string size and the characters are kept in the same heap allocation.
+// class CompactString {
+// public:
+// CompactString(const char* s = "") {
+// const size_t size = strlen(s);
+// // size_t[1] followed by char[size + 1].
+// const L layout(1, size + 1);
+// p_.reset(new unsigned char[layout.AllocSize()]);
+// // If running under ASAN, mark the padding bytes, if any, to catch
+// // memory errors.
+// layout.PoisonPadding(p_.get());
+// // Store the size in the allocation.
+// *layout.Pointer<size_t>(p_.get()) = size;
+// // Store the characters in the allocation.
+// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+// }
+//
+// size_t size() const {
+// // Equivalent to reinterpret_cast<size_t&>(*p).
+// return *L::Partial().Pointer<size_t>(p_.get());
+// }
+//
+// const char* c_str() const {
+// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+// // The argument in Partial(1) specifies that we have size_t[1] in front
+// // of the characters.
+// return L::Partial(1).Pointer<char>(p_.get());
+// }
+//
+// private:
+// // Our heap allocation contains a size_t followed by an array of chars.
+// using L = Layout<size_t, char>;
+// std::unique_ptr<unsigned char[]> p_;
+// };
+//
+// int main() {
+// CompactString s = "hello";
+// assert(s.size() == 5);
+// assert(strcmp(s.c_str(), "hello") == 0);
+// }
+//
+// DOCUMENTATION
+//
+// The interface exported by this file consists of:
+// - class `Layout<>` and its public members.
+// - The public members of class `internal_layout::LayoutImpl<>`. That class
+// isn't intended to be used directly, and its name and template parameter
+// list are internal implementation details, but the class itself provides
+// most of the functionality in this file. See comments on its members for
+// detailed documentation.
+//
+// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
+// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
+// creates a `Layout` object, which exposes the same functionality by inheriting
+// from `LayoutImpl<>`.
+
+#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ostream>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
#include "absl/base/config.h"
-#include "absl/meta/type_traits.h"
-#include "absl/strings/str_cat.h"
-#include "absl/types/span.h"
-#include "absl/utility/utility.h"
-
+#include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
+#include "absl/types/span.h"
+#include "absl/utility/utility.h"
+
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
-#if defined(__GXX_RTTI)
-#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
-#endif
-
-#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
-#include <cxxabi.h>
-#endif
-
-namespace absl {
+#if defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#endif
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// A type wrapper that instructs `Layout` to use the specific alignment for the
-// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
-// and behavior as `Layout<..., T, ...>` except that the first element of the
-// array of `T` is aligned to `N` (the rest of the elements follow without
-// padding).
-//
-// Requires: `N >= alignof(T)` and `N` is a power of 2.
-template <class T, size_t N>
-struct Aligned;
-
-namespace internal_layout {
-
-template <class T>
-struct NotAligned {};
-
-template <class T, size_t N>
-struct NotAligned<const Aligned<T, N>> {
- static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
-};
-
-template <size_t>
-using IntToSize = size_t;
-
-template <class>
-using TypeToSize = size_t;
-
-template <class T>
-struct Type : NotAligned<T> {
- using type = T;
-};
-
-template <class T, size_t N>
-struct Type<Aligned<T, N>> {
- using type = T;
-};
-
-template <class T>
-struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
-
-template <class T, size_t N>
-struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
-
-// Note: workaround for https://gcc.gnu.org/PR88115
-template <class T>
-struct AlignOf : NotAligned<T> {
- static constexpr size_t value = alignof(T);
-};
-
-template <class T, size_t N>
-struct AlignOf<Aligned<T, N>> {
- static_assert(N % alignof(T) == 0,
- "Custom alignment can't be lower than the type's alignment");
- static constexpr size_t value = N;
-};
-
-// Does `Ts...` contain `T`?
-template <class T, class... Ts>
-using Contains = absl::disjunction<std::is_same<T, Ts>...>;
-
-template <class From, class To>
-using CopyConst =
- typename std::conditional<std::is_const<From>::value, const To, To>::type;
-
-// Note: We're not qualifying this with absl:: because it doesn't compile under
-// MSVC.
-template <class T>
-using SliceType = Span<T>;
-
-// This namespace contains no types. It prevents functions defined in it from
-// being found by ADL.
-namespace adl_barrier {
-
-template <class Needle, class... Ts>
-constexpr size_t Find(Needle, Needle, Ts...) {
- static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
- return 0;
-}
-
-template <class Needle, class T, class... Ts>
-constexpr size_t Find(Needle, T, Ts...) {
- return adl_barrier::Find(Needle(), Ts()...) + 1;
-}
-
-constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
-
-// Returns `q * m` for the smallest `q` such that `q * m >= n`.
-// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
-constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
-
-constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
-
-constexpr size_t Max(size_t a) { return a; }
-
-template <class... Ts>
-constexpr size_t Max(size_t a, size_t b, Ts... rest) {
- return adl_barrier::Max(b < a ? a : b, rest...);
-}
-
-template <class T>
-std::string TypeName() {
- std::string out;
- int status = 0;
- char* demangled = nullptr;
-#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
- demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
-#endif
- if (status == 0 && demangled != nullptr) { // Demangling succeeded.
- absl::StrAppend(&out, "<", demangled, ">");
- free(demangled);
- } else {
-#if defined(__GXX_RTTI) || defined(_CPPRTTI)
- absl::StrAppend(&out, "<", typeid(T).name(), ">");
-#endif
- }
- return out;
-}
-
-} // namespace adl_barrier
-
-template <bool C>
-using EnableIf = typename std::enable_if<C, int>::type;
-
-// Can `T` be a template argument of `Layout`?
-template <class T>
-using IsLegalElementType = std::integral_constant<
- bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
- !std::is_reference<typename Type<T>::type>::value &&
- !std::is_volatile<typename Type<T>::type>::value &&
- adl_barrier::IsPow2(AlignOf<T>::value)>;
-
-template <class Elements, class SizeSeq, class OffsetSeq>
-class LayoutImpl;
-
-// Public base class of `Layout` and the result type of `Layout::Partial()`.
-//
-// `Elements...` contains all template arguments of `Layout` that created this
-// instance.
-//
-// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
-// passed to `Layout::Partial()` or `Layout::Layout()`.
-//
-// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
-// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
-// can compute offsets).
-template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
-class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
- absl::index_sequence<OffsetSeq...>> {
- private:
- static_assert(sizeof...(Elements) > 0, "At least one field is required");
- static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
- "Invalid element type (see IsLegalElementType)");
-
- enum {
- NumTypes = sizeof...(Elements),
- NumSizes = sizeof...(SizeSeq),
- NumOffsets = sizeof...(OffsetSeq),
- };
-
- // These are guaranteed by `Layout`.
- static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
- "Internal error");
- static_assert(NumTypes > 0, "Internal error");
-
- // Returns the index of `T` in `Elements...`. Results in a compilation error
- // if `Elements...` doesn't contain exactly one instance of `T`.
- template <class T>
- static constexpr size_t ElementIndex() {
- static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
- "Type not found");
- return adl_barrier::Find(Type<T>(),
- Type<typename Type<Elements>::type>()...);
- }
-
- template <size_t N>
- using ElementAlignment =
- AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
-
- public:
- // Element types of all arrays packed in a tuple.
- using ElementTypes = std::tuple<typename Type<Elements>::type...>;
-
- // Element type of the Nth array.
- template <size_t N>
- using ElementType = typename std::tuple_element<N, ElementTypes>::type;
-
- constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
- : size_{sizes...} {}
-
- // Alignment of the layout, equal to the strictest alignment of all elements.
- // All pointers passed to the methods of layout must be aligned to this value.
- static constexpr size_t Alignment() {
- return adl_barrier::Max(AlignOf<Elements>::value...);
- }
-
- // Offset in bytes of the Nth array.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // assert(x.Offset<0>() == 0); // The ints starts from 0.
- // assert(x.Offset<1>() == 16); // The doubles starts from 16.
- //
- // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
- template <size_t N, EnableIf<N == 0> = 0>
- constexpr size_t Offset() const {
- return 0;
- }
-
- template <size_t N, EnableIf<N != 0> = 0>
- constexpr size_t Offset() const {
- static_assert(N < NumOffsets, "Index out of bounds");
- return adl_barrier::Align(
+namespace container_internal {
+
+// A type wrapper that instructs `Layout` to use the specific alignment for the
+// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding).
+//
+// Requires: `N >= alignof(T)` and `N` is a power of 2.
+template <class T, size_t N>
+struct Aligned;
+
+namespace internal_layout {
+
+template <class T>
+struct NotAligned {};
+
+template <class T, size_t N>
+struct NotAligned<const Aligned<T, N>> {
+ static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
+};
+
+template <size_t>
+using IntToSize = size_t;
+
+template <class>
+using TypeToSize = size_t;
+
+template <class T>
+struct Type : NotAligned<T> {
+ using type = T;
+};
+
+template <class T, size_t N>
+struct Type<Aligned<T, N>> {
+ using type = T;
+};
+
+template <class T>
+struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
+
+template <class T, size_t N>
+struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
+
+// Note: workaround for https://gcc.gnu.org/PR88115
+template <class T>
+struct AlignOf : NotAligned<T> {
+ static constexpr size_t value = alignof(T);
+};
+
+template <class T, size_t N>
+struct AlignOf<Aligned<T, N>> {
+ static_assert(N % alignof(T) == 0,
+ "Custom alignment can't be lower than the type's alignment");
+ static constexpr size_t value = N;
+};
+
+// Does `Ts...` contain `T`?
+template <class T, class... Ts>
+using Contains = absl::disjunction<std::is_same<T, Ts>...>;
+
+template <class From, class To>
+using CopyConst =
+ typename std::conditional<std::is_const<From>::value, const To, To>::type;
+
+// Note: We're not qualifying this with absl:: because it doesn't compile under
+// MSVC.
+template <class T>
+using SliceType = Span<T>;
+
+// This namespace contains no types. It prevents functions defined in it from
+// being found by ADL.
+namespace adl_barrier {
+
+template <class Needle, class... Ts>
+constexpr size_t Find(Needle, Needle, Ts...) {
+ static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
+ return 0;
+}
+
+template <class Needle, class T, class... Ts>
+constexpr size_t Find(Needle, T, Ts...) {
+ return adl_barrier::Find(Needle(), Ts()...) + 1;
+}
+
+constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
+
+// Returns `q * m` for the smallest `q` such that `q * m >= n`.
+// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
+constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
+constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
+
+constexpr size_t Max(size_t a) { return a; }
+
+template <class... Ts>
+constexpr size_t Max(size_t a, size_t b, Ts... rest) {
+ return adl_barrier::Max(b < a ? a : b, rest...);
+}
+
+template <class T>
+std::string TypeName() {
+ std::string out;
+ int status = 0;
+ char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+ demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+#endif
+ if (status == 0 && demangled != nullptr) { // Demangling succeeded.
+ absl::StrAppend(&out, "<", demangled, ">");
+ free(demangled);
+ } else {
+#if defined(__GXX_RTTI) || defined(_CPPRTTI)
+ absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#endif
+ }
+ return out;
+}
+
+} // namespace adl_barrier
+
+template <bool C>
+using EnableIf = typename std::enable_if<C, int>::type;
+
+// Can `T` be a template argument of `Layout`?
+template <class T>
+using IsLegalElementType = std::integral_constant<
+ bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
+ !std::is_reference<typename Type<T>::type>::value &&
+ !std::is_volatile<typename Type<T>::type>::value &&
+ adl_barrier::IsPow2(AlignOf<T>::value)>;
+
+template <class Elements, class SizeSeq, class OffsetSeq>
+class LayoutImpl;
+
+// Public base class of `Layout` and the result type of `Layout::Partial()`.
+//
+// `Elements...` contains all template arguments of `Layout` that created this
+// instance.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
+// passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
+// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
+// can compute offsets).
+template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
+ absl::index_sequence<OffsetSeq...>> {
+ private:
+ static_assert(sizeof...(Elements) > 0, "At least one field is required");
+ static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ enum {
+ NumTypes = sizeof...(Elements),
+ NumSizes = sizeof...(SizeSeq),
+ NumOffsets = sizeof...(OffsetSeq),
+ };
+
+ // These are guaranteed by `Layout`.
+ static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
+ "Internal error");
+ static_assert(NumTypes > 0, "Internal error");
+
+ // Returns the index of `T` in `Elements...`. Results in a compilation error
+ // if `Elements...` doesn't contain exactly one instance of `T`.
+ template <class T>
+ static constexpr size_t ElementIndex() {
+ static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
+ "Type not found");
+ return adl_barrier::Find(Type<T>(),
+ Type<typename Type<Elements>::type>()...);
+ }
+
+ template <size_t N>
+ using ElementAlignment =
+ AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
+
+ public:
+ // Element types of all arrays packed in a tuple.
+ using ElementTypes = std::tuple<typename Type<Elements>::type...>;
+
+ // Element type of the Nth array.
+ template <size_t N>
+ using ElementType = typename std::tuple_element<N, ElementTypes>::type;
+
+ constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+ : size_{sizes...} {}
+
+ // Alignment of the layout, equal to the strictest alignment of all elements.
+ // All pointers passed to the methods of layout must be aligned to this value.
+ static constexpr size_t Alignment() {
+ return adl_barrier::Max(AlignOf<Elements>::value...);
+ }
+
+ // Offset in bytes of the Nth array.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<0>() == 0); // The ints starts from 0.
+ // assert(x.Offset<1>() == 16); // The doubles starts from 16.
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ template <size_t N, EnableIf<N == 0> = 0>
+ constexpr size_t Offset() const {
+ return 0;
+ }
+
+ template <size_t N, EnableIf<N != 0> = 0>
+ constexpr size_t Offset() const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
- ElementAlignment<N>::value);
- }
-
- // Offset in bytes of the array with the specified element type. There must
- // be exactly one such array and its zero-based index must be at most
- // `NumSizes`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // assert(x.Offset<int>() == 0); // The ints starts from 0.
- // assert(x.Offset<double>() == 16); // The doubles starts from 16.
- template <class T>
- constexpr size_t Offset() const {
- return Offset<ElementIndex<T>()>();
- }
-
- // Offsets in bytes of all arrays for which the offsets are known.
- constexpr std::array<size_t, NumOffsets> Offsets() const {
- return {{Offset<OffsetSeq>()...}};
- }
-
- // The number of elements in the Nth array. This is the Nth argument of
- // `Layout::Partial()` or `Layout::Layout()` (zero-based).
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // assert(x.Size<0>() == 3);
- // assert(x.Size<1>() == 4);
- //
- // Requires: `N < NumSizes`.
- template <size_t N>
- constexpr size_t Size() const {
- static_assert(N < NumSizes, "Index out of bounds");
- return size_[N];
- }
-
- // The number of elements in the array with the specified element type.
- // There must be exactly one such array and its zero-based index must be
- // at most `NumSizes`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // assert(x.Size<int>() == 3);
- // assert(x.Size<double>() == 4);
- template <class T>
- constexpr size_t Size() const {
- return Size<ElementIndex<T>()>();
- }
-
- // The number of elements of all arrays for which they are known.
- constexpr std::array<size_t, NumSizes> Sizes() const {
- return {{Size<SizeSeq>()...}};
- }
-
- // Pointer to the beginning of the Nth array.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- // int* ints = x.Pointer<0>(p);
- // double* doubles = x.Pointer<1>(p);
- //
- // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
- // Requires: `p` is aligned to `Alignment()`.
- template <size_t N, class Char>
- CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
- using C = typename std::remove_const<Char>::type;
- static_assert(
- std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
- std::is_same<C, signed char>(),
- "The argument must be a pointer to [const] [signed|unsigned] char");
- constexpr size_t alignment = Alignment();
- (void)alignment;
- assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
- return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
- }
-
- // Pointer to the beginning of the array with the specified element type.
- // There must be exactly one such array and its zero-based index must be at
- // most `NumSizes`.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- // int* ints = x.Pointer<int>(p);
- // double* doubles = x.Pointer<double>(p);
- //
- // Requires: `p` is aligned to `Alignment()`.
- template <class T, class Char>
- CopyConst<Char, T>* Pointer(Char* p) const {
- return Pointer<ElementIndex<T>()>(p);
- }
-
- // Pointers to all arrays for which pointers are known.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- //
- // int* ints;
- // double* doubles;
- // std::tie(ints, doubles) = x.Pointers(p);
- //
- // Requires: `p` is aligned to `Alignment()`.
- //
- // Note: We're not using ElementType alias here because it does not compile
- // under MSVC.
- template <class Char>
- std::tuple<CopyConst<
- Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
- Pointers(Char* p) const {
- return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
- Pointer<OffsetSeq>(p)...);
- }
-
- // The Nth array.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- // Span<int> ints = x.Slice<0>(p);
- // Span<double> doubles = x.Slice<1>(p);
- //
- // Requires: `N < NumSizes`.
- // Requires: `p` is aligned to `Alignment()`.
- template <size_t N, class Char>
- SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
- return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
- }
-
- // The array with the specified element type. There must be exactly one
- // such array and its zero-based index must be less than `NumSizes`.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- // Span<int> ints = x.Slice<int>(p);
- // Span<double> doubles = x.Slice<double>(p);
- //
- // Requires: `p` is aligned to `Alignment()`.
- template <class T, class Char>
- SliceType<CopyConst<Char, T>> Slice(Char* p) const {
- return Slice<ElementIndex<T>()>(p);
- }
-
- // All arrays with known sizes.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()];
- //
- // Span<int> ints;
- // Span<double> doubles;
- // std::tie(ints, doubles) = x.Slices(p);
- //
- // Requires: `p` is aligned to `Alignment()`.
- //
- // Note: We're not using ElementType alias here because it does not compile
- // under MSVC.
- template <class Char>
- std::tuple<SliceType<CopyConst<
- Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
- Slices(Char* p) const {
- // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
- // in 6.1).
- (void)p;
- return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
- Slice<SizeSeq>(p)...);
- }
-
- // The size of the allocation that fits all arrays.
- //
- // // int[3], 4 bytes of padding, double[4].
- // Layout<int, double> x(3, 4);
- // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
- //
- // Requires: `NumSizes == sizeof...(Ts)`.
- constexpr size_t AllocSize() const {
- static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
- return Offset<NumTypes - 1>() +
+ ElementAlignment<N>::value);
+ }
+
+ // Offset in bytes of the array with the specified element type. There must
+ // be exactly one such array and its zero-based index must be at most
+ // `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<int>() == 0); // The ints starts from 0.
+ // assert(x.Offset<double>() == 16); // The doubles starts from 16.
+ template <class T>
+ constexpr size_t Offset() const {
+ return Offset<ElementIndex<T>()>();
+ }
+
+ // Offsets in bytes of all arrays for which the offsets are known.
+ constexpr std::array<size_t, NumOffsets> Offsets() const {
+ return {{Offset<OffsetSeq>()...}};
+ }
+
+ // The number of elements in the Nth array. This is the Nth argument of
+ // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<0>() == 3);
+ // assert(x.Size<1>() == 4);
+ //
+ // Requires: `N < NumSizes`.
+ template <size_t N>
+ constexpr size_t Size() const {
+ static_assert(N < NumSizes, "Index out of bounds");
+ return size_[N];
+ }
+
+ // The number of elements in the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be
+ // at most `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<int>() == 3);
+ // assert(x.Size<double>() == 4);
+ template <class T>
+ constexpr size_t Size() const {
+ return Size<ElementIndex<T>()>();
+ }
+
+ // The number of elements of all arrays for which they are known.
+ constexpr std::array<size_t, NumSizes> Sizes() const {
+ return {{Size<SizeSeq>()...}};
+ }
+
+ // Pointer to the beginning of the Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<0>(p);
+ // double* doubles = x.Pointer<1>(p);
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
+ using C = typename std::remove_const<Char>::type;
+ static_assert(
+ std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
+ std::is_same<C, signed char>(),
+ "The argument must be a pointer to [const] [signed|unsigned] char");
+ constexpr size_t alignment = Alignment();
+ (void)alignment;
+ assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
+ return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
+ }
+
+ // Pointer to the beginning of the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be at
+ // most `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<int>(p);
+ // double* doubles = x.Pointer<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ CopyConst<Char, T>* Pointer(Char* p) const {
+ return Pointer<ElementIndex<T>()>(p);
+ }
+
+ // Pointers to all arrays for which pointers are known.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // int* ints;
+ // double* doubles;
+ // std::tie(ints, doubles) = x.Pointers(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<CopyConst<
+ Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
+ Pointers(Char* p) const {
+ return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
+ Pointer<OffsetSeq>(p)...);
+ }
+
+ // The Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<0>(p);
+ // Span<double> doubles = x.Slice<1>(p);
+ //
+ // Requires: `N < NumSizes`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
+ return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
+ }
+
+ // The array with the specified element type. There must be exactly one
+ // such array and its zero-based index must be less than `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<int>(p);
+ // Span<double> doubles = x.Slice<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ SliceType<CopyConst<Char, T>> Slice(Char* p) const {
+ return Slice<ElementIndex<T>()>(p);
+ }
+
+ // All arrays with known sizes.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // Span<int> ints;
+ // Span<double> doubles;
+ // std::tie(ints, doubles) = x.Slices(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<SliceType<CopyConst<
+ Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
+ Slices(Char* p) const {
+ // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
+ // in 6.1).
+ (void)p;
+ return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
+ Slice<SizeSeq>(p)...);
+ }
+
+ // The size of the allocation that fits all arrays.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
+ //
+ // Requires: `NumSizes == sizeof...(Ts)`.
+ constexpr size_t AllocSize() const {
+ static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
+ return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
- }
-
- // If built with --config=asan, poisons padding bytes (if any) in the
- // allocation. The pointer must point to a memory block at least
- // `AllocSize()` bytes in length.
- //
- // `Char` must be `[const] [signed|unsigned] char`.
- //
- // Requires: `p` is aligned to `Alignment()`.
- template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
- void PoisonPadding(const Char* p) const {
- Pointer<0>(p); // verify the requirements on `Char` and `p`
- }
-
- template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
- void PoisonPadding(const Char* p) const {
- static_assert(N < NumOffsets, "Index out of bounds");
- (void)p;
+ }
+
+ // If built with --config=asan, poisons padding bytes (if any) in the
+ // allocation. The pointer must point to a memory block at least
+ // `AllocSize()` bytes in length.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ Pointer<0>(p); // verify the requirements on `Char` and `p`
+ }
+
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ (void)p;
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
- PoisonPadding<Char, N - 1>(p);
- // The `if` is an optimization. It doesn't affect the observable behaviour.
- if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
- size_t start =
+ PoisonPadding<Char, N - 1>(p);
+ // The `if` is an optimization. It doesn't affect the observable behaviour.
+ if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
+ size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
- ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
- }
-#endif
- }
-
- // Human-readable description of the memory layout. Useful for debugging.
- // Slow.
- //
- // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
- // // by an unknown number of doubles.
- // auto x = Layout<char, int, double>::Partial(5, 3);
- // assert(x.DebugString() ==
- // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
- //
- // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
- // may be missing depending on the target platform). For example,
- // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
- // int is 4 bytes, and we have 3 of those ints. The size of the last field may
- // be missing (as in the example above). Only fields with known offsets are
- // described. Type names may differ across platforms: one compiler might
- // produce "unsigned*" where another produces "unsigned int *".
- std::string DebugString() const {
- const auto offsets = Offsets();
+ ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
+ }
+#endif
+ }
+
+ // Human-readable description of the memory layout. Useful for debugging.
+ // Slow.
+ //
+ // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
+ // // by an unknown number of doubles.
+ // auto x = Layout<char, int, double>::Partial(5, 3);
+ // assert(x.DebugString() ==
+ // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
+ //
+ // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
+ // may be missing depending on the target platform). For example,
+ // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
+ // int is 4 bytes, and we have 3 of those ints. The size of the last field may
+ // be missing (as in the example above). Only fields with known offsets are
+ // described. Type names may differ across platforms: one compiler might
+ // produce "unsigned*" where another produces "unsigned int *".
+ std::string DebugString() const {
+ const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
- const std::string types[] = {
- adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
- std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
- for (size_t i = 0; i != NumOffsets - 1; ++i) {
- absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
- "(", sizes[i + 1], ")");
- }
- // NumSizes is a constant that may be zero. Some compilers cannot see that
- // inside the if statement "size_[NumSizes - 1]" must be valid.
- int last = static_cast<int>(NumSizes) - 1;
- if (NumTypes == NumSizes && last >= 0) {
- absl::StrAppend(&res, "[", size_[last], "]");
- }
- return res;
- }
-
- private:
- // Arguments of `Layout::Partial()` or `Layout::Layout()`.
- size_t size_[NumSizes > 0 ? NumSizes : 1];
-};
-
-template <size_t NumSizes, class... Ts>
-using LayoutType = LayoutImpl<
- std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
- absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
-
-} // namespace internal_layout
-
-// Descriptor of arrays of various types and sizes laid out in memory one after
-// another. See the top of the file for documentation.
-//
-// Check out the public API of internal_layout::LayoutImpl above. The type is
-// internal to the library but its methods are public, and they are inherited
-// by `Layout`.
-template <class... Ts>
-class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
- public:
- static_assert(sizeof...(Ts) > 0, "At least one field is required");
- static_assert(
- absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
- "Invalid element type (see IsLegalElementType)");
-
- // The result type of `Partial()` with `NumSizes` arguments.
- template <size_t NumSizes>
- using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
-
- // `Layout` knows the element types of the arrays we want to lay out in
- // memory but not the number of elements in each array.
- // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
- // resulting immutable object can be used to obtain pointers to the
- // individual arrays.
- //
- // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
- // if all you need is to the offset of the second array, you only need to
- // pass one argument -- the number of elements in the first array.
- //
- // // int[3] followed by 4 bytes of padding and an unknown number of
- // // doubles.
- // auto x = Layout<int, double>::Partial(3);
- // // doubles start at byte 16.
- // assert(x.Offset<1>() == 16);
- //
- // If you know the number of elements in all arrays, you can still call
- // `Partial()` but it's more convenient to use the constructor of `Layout`.
- //
- // Layout<int, double> x(3, 5);
- //
- // Note: The sizes of the arrays must be specified in number of elements,
- // not in bytes.
- //
- // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
- // Requires: all arguments are convertible to `size_t`.
- template <class... Sizes>
- static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
- static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
- return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
- }
-
- // Creates a layout with the sizes of all arrays specified. If you know
- // only the sizes of the first N arrays (where N can be zero), you can use
- // `Partial()` defined above. The constructor is essentially equivalent to
- // calling `Partial()` and passing in all array sizes; the constructor is
- // provided as a convenient abbreviation.
- //
- // Note: The sizes of the arrays must be specified in number of elements,
- // not in bytes.
- constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
- : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
-};
-
-} // namespace container_internal
+ const std::string types[] = {
+ adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
+ for (size_t i = 0; i != NumOffsets - 1; ++i) {
+ absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
+ "(", sizes[i + 1], ")");
+ }
+ // NumSizes is a constant that may be zero. Some compilers cannot see that
+ // inside the if statement "size_[NumSizes - 1]" must be valid.
+ int last = static_cast<int>(NumSizes) - 1;
+ if (NumTypes == NumSizes && last >= 0) {
+ absl::StrAppend(&res, "[", size_[last], "]");
+ }
+ return res;
+ }
+
+ private:
+ // Arguments of `Layout::Partial()` or `Layout::Layout()`.
+ size_t size_[NumSizes > 0 ? NumSizes : 1];
+};
+
+template <size_t NumSizes, class... Ts>
+using LayoutType = LayoutImpl<
+ std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
+ absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+
+} // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutImpl above. The type is
+// internal to the library but its methods are public, and they are inherited
+// by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
+ public:
+ static_assert(sizeof...(Ts) > 0, "At least one field is required");
+ static_assert(
+ absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ // The result type of `Partial()` with `NumSizes` arguments.
+ template <size_t NumSizes>
+ using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+
+ // `Layout` knows the element types of the arrays we want to lay out in
+ // memory but not the number of elements in each array.
+ // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
+ // resulting immutable object can be used to obtain pointers to the
+ // individual arrays.
+ //
+ // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
+ // if all you need is to the offset of the second array, you only need to
+ // pass one argument -- the number of elements in the first array.
+ //
+ // // int[3] followed by 4 bytes of padding and an unknown number of
+ // // doubles.
+ // auto x = Layout<int, double>::Partial(3);
+ // // doubles start at byte 16.
+ // assert(x.Offset<1>() == 16);
+ //
+ // If you know the number of elements in all arrays, you can still call
+ // `Partial()` but it's more convenient to use the constructor of `Layout`.
+ //
+ // Layout<int, double> x(3, 5);
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ //
+ // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+ // Requires: all arguments are convertible to `size_t`.
+ template <class... Sizes>
+ static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
+ static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
+ return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
+ }
+
+ // Creates a layout with the sizes of all arrays specified. If you know
+ // only the sizes of the first N arrays (where N can be zero), you can use
+ // `Partial()` defined above. The constructor is essentially equivalent to
+ // calling `Partial()` and passing in all array sizes; the constructor is
+ // provided as a convenient abbreviation.
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
+ : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/node_hash_policy.h b/contrib/restricted/abseil-cpp/absl/container/internal/node_hash_policy.h
index 4617162f0b..bae22abe0a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/node_hash_policy.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/node_hash_policy.h
@@ -1,92 +1,92 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Adapts a policy for nodes.
-//
-// The node policy should model:
-//
-// struct Policy {
-// // Returns a new node allocated and constructed using the allocator, using
-// // the specified arguments.
-// template <class Alloc, class... Args>
-// value_type* new_element(Alloc* alloc, Args&&... args) const;
-//
-// // Destroys and deallocates node using the allocator.
-// template <class Alloc>
-// void delete_element(Alloc* alloc, value_type* node) const;
-// };
-//
-// It may also optionally define `value()` and `apply()`. For documentation on
-// these, see hash_policy_traits.h.
-
-#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
-#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
-
-#include <cassert>
-#include <cstddef>
-#include <memory>
-#include <type_traits>
-#include <utility>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Adapts a policy for nodes.
+//
+// The node policy should model:
+//
+// struct Policy {
+// // Returns a new node allocated and constructed using the allocator, using
+// // the specified arguments.
+// template <class Alloc, class... Args>
+// value_type* new_element(Alloc* alloc, Args&&... args) const;
+//
+// // Destroys and deallocates node using the allocator.
+// template <class Alloc>
+// void delete_element(Alloc* alloc, value_type* node) const;
+// };
+//
+// It may also optionally define `value()` and `apply()`. For documentation on
+// these, see hash_policy_traits.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class Reference, class Policy>
-struct node_hash_policy {
- static_assert(std::is_lvalue_reference<Reference>::value, "");
-
- using slot_type = typename std::remove_cv<
- typename std::remove_reference<Reference>::type>::type*;
-
- template <class Alloc, class... Args>
- static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
- *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
- }
-
- template <class Alloc>
- static void destroy(Alloc* alloc, slot_type* slot) {
- Policy::delete_element(alloc, *slot);
- }
-
- template <class Alloc>
- static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
- *new_slot = *old_slot;
- }
-
- static size_t space_used(const slot_type* slot) {
- if (slot == nullptr) return Policy::element_space_used(nullptr);
- return Policy::element_space_used(*slot);
- }
-
- static Reference element(slot_type* slot) { return **slot; }
-
- template <class T, class P = Policy>
- static auto value(T* elem) -> decltype(P::value(elem)) {
- return P::value(elem);
- }
-
- template <class... Ts, class P = Policy>
- static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
- return P::apply(std::forward<Ts>(ts)...);
- }
-};
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class Reference, class Policy>
+struct node_hash_policy {
+ static_assert(std::is_lvalue_reference<Reference>::value, "");
+
+ using slot_type = typename std::remove_cv<
+ typename std::remove_reference<Reference>::type>::type*;
+
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
+ }
+
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::delete_element(alloc, *slot);
+ }
+
+ template <class Alloc>
+ static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static size_t space_used(const slot_type* slot) {
+ if (slot == nullptr) return Policy::element_space_used(nullptr);
+ return Policy::element_space_used(*slot);
+ }
+
+ static Reference element(slot_type* slot) { return **slot; }
+
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ template <class... Ts, class P = Policy>
+ static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<Ts>(ts)...);
+ }
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
index c7df2efc62..6802b332f0 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -1,198 +1,198 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
-#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
-
-#include <tuple>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/internal/throw_delegate.h"
-#include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class Policy, class Hash, class Eq, class Alloc>
-class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
- // P is Policy. It's passed as a template argument to support maps that have
- // incomplete types as values, as in unordered_map<K, IncompleteType>.
- // MappedReference<> may be a non-reference type.
- template <class P>
- using MappedReference = decltype(P::value(
- std::addressof(std::declval<typename raw_hash_map::reference>())));
-
- // MappedConstReference<> may be a non-reference type.
- template <class P>
- using MappedConstReference = decltype(P::value(
- std::addressof(std::declval<typename raw_hash_map::const_reference>())));
-
- using KeyArgImpl =
- KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
-
- public:
- using key_type = typename Policy::key_type;
- using mapped_type = typename Policy::mapped_type;
- template <class K>
- using key_arg = typename KeyArgImpl::template type<K, key_type>;
-
- static_assert(!std::is_reference<key_type>::value, "");
+namespace container_internal {
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
+ // P is Policy. It's passed as a template argument to support maps that have
+ // incomplete types as values, as in unordered_map<K, IncompleteType>.
+ // MappedReference<> may be a non-reference type.
+ template <class P>
+ using MappedReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::reference>())));
+
+ // MappedConstReference<> may be a non-reference type.
+ template <class P>
+ using MappedConstReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::const_reference>())));
+
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ static_assert(!std::is_reference<key_type>::value, "");
// TODO(b/187807849): Evaluate whether to support reference mapped_type and
// remove this assertion if/when it is supported.
- static_assert(!std::is_reference<mapped_type>::value, "");
-
- using iterator = typename raw_hash_map::raw_hash_set::iterator;
- using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
-
- raw_hash_map() {}
- using raw_hash_map::raw_hash_set::raw_hash_set;
-
- // The last two template parameters ensure that both arguments are rvalues
- // (lvalue arguments are handled by the overloads below). This is necessary
- // for supporting bitfield arguments.
- //
- // union { int n : 1; };
- // flat_hash_map<int, int> m;
- // m.insert_or_assign(n, n);
- template <class K = key_type, class V = mapped_type, K* = nullptr,
- V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
- return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
- return insert_or_assign_impl(std::forward<K>(k), v);
- }
-
- template <class K = key_type, class V = mapped_type, V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
- return insert_or_assign_impl(k, std::forward<V>(v));
- }
-
- template <class K = key_type, class V = mapped_type>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
- return insert_or_assign_impl(k, v);
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr,
- V* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
- return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
- }
-
- template <class K = key_type, class V = mapped_type, K* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
- return insert_or_assign(std::forward<K>(k), v).first;
- }
-
- template <class K = key_type, class V = mapped_type, V* = nullptr>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
- return insert_or_assign(k, std::forward<V>(v)).first;
- }
-
- template <class K = key_type, class V = mapped_type>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
- return insert_or_assign(k, v).first;
- }
-
- // All `try_emplace()` overloads make the same guarantees regarding rvalue
- // arguments as `std::unordered_map::try_emplace()`, namely that these
- // functions will not move from rvalue arguments if insertions do not happen.
- template <class K = key_type, class... Args,
- typename std::enable_if<
- !std::is_convertible<K, const_iterator>::value, int>::type = 0,
- K* = nullptr>
- std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
- return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
- }
-
- template <class K = key_type, class... Args,
- typename std::enable_if<
- !std::is_convertible<K, const_iterator>::value, int>::type = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
- return try_emplace_impl(k, std::forward<Args>(args)...);
- }
-
- template <class K = key_type, class... Args, K* = nullptr>
- iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
- return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
- }
-
- template <class K = key_type, class... Args>
- iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
- return try_emplace(k, std::forward<Args>(args)...).first;
- }
-
- template <class K = key_type, class P = Policy>
- MappedReference<P> at(const key_arg<K>& key) {
- auto it = this->find(key);
- if (it == this->end()) {
- base_internal::ThrowStdOutOfRange(
- "absl::container_internal::raw_hash_map<>::at");
- }
- return Policy::value(&*it);
- }
-
- template <class K = key_type, class P = Policy>
- MappedConstReference<P> at(const key_arg<K>& key) const {
- auto it = this->find(key);
- if (it == this->end()) {
- base_internal::ThrowStdOutOfRange(
- "absl::container_internal::raw_hash_map<>::at");
- }
- return Policy::value(&*it);
- }
-
- template <class K = key_type, class P = Policy, K* = nullptr>
- MappedReference<P> operator[](key_arg<K>&& key) {
- return Policy::value(&*try_emplace(std::forward<K>(key)).first);
- }
-
- template <class K = key_type, class P = Policy>
- MappedReference<P> operator[](const key_arg<K>& key) {
- return Policy::value(&*try_emplace(key).first);
- }
-
- private:
- template <class K, class V>
- std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
- auto res = this->find_or_prepare_insert(k);
- if (res.second)
- this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
- else
- Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
- return {this->iterator_at(res.first), res.second};
- }
-
- template <class K = key_type, class... Args>
- std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
- auto res = this->find_or_prepare_insert(k);
- if (res.second)
- this->emplace_at(res.first, std::piecewise_construct,
- std::forward_as_tuple(std::forward<K>(k)),
- std::forward_as_tuple(std::forward<Args>(args)...));
- return {this->iterator_at(res.first), res.second};
- }
-};
-
-} // namespace container_internal
+ static_assert(!std::is_reference<mapped_type>::value, "");
+
+ using iterator = typename raw_hash_map::raw_hash_set::iterator;
+ using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
+
+ raw_hash_map() {}
+ using raw_hash_map::raw_hash_set::raw_hash_set;
+
+ // The last two template parameters ensure that both arguments are rvalues
+ // (lvalue arguments are handled by the overloads below). This is necessary
+ // for supporting bitfield arguments.
+ //
+ // union { int n : 1; };
+ // flat_hash_map<int, int> m;
+ // m.insert_or_assign(n, n);
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ return insert_or_assign_impl(std::forward<K>(k), v);
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ return insert_or_assign_impl(k, std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ return insert_or_assign_impl(k, v);
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ return insert_or_assign(std::forward<K>(k), v).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ return insert_or_assign(k, std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ return insert_or_assign(k, v).first;
+ }
+
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0,
+ K* = nullptr>
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args, K* = nullptr>
+ iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class... Args>
+ iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ return try_emplace(k, std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> at(const key_arg<K>& key) {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedConstReference<P> at(const key_arg<K>& key) const {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy, K* = nullptr>
+ MappedReference<P> operator[](key_arg<K>&& key) {
+ return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> operator[](const key_arg<K>& key) {
+ return Policy::value(&*try_emplace(key).first);
+ }
+
+ private:
+ template <class K, class V>
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
+ else
+ Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
+ return {this->iterator_at(res.first), res.second};
+ }
+
+ template <class K = key_type, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ return {this->iterator_at(res.first), res.second};
+ }
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 687bcb8a4d..17dca0df31 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -1,54 +1,54 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/container/internal/raw_hash_set.h"
-
-#include <atomic>
-#include <cstddef>
-
-#include "absl/base/config.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <atomic>
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
+namespace container_internal {
+
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
-constexpr size_t Group::kWidth;
-
-// Returns "random" seed.
-inline size_t RandomSeed() {
+constexpr size_t Group::kWidth;
+
+// Returns "random" seed.
+inline size_t RandomSeed() {
#ifdef ABSL_HAVE_THREAD_LOCAL
- static thread_local size_t counter = 0;
- size_t value = ++counter;
-#else // ABSL_HAVE_THREAD_LOCAL
- static std::atomic<size_t> counter(0);
- size_t value = counter.fetch_add(1, std::memory_order_relaxed);
-#endif // ABSL_HAVE_THREAD_LOCAL
- return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
-}
-
+ static thread_local size_t counter = 0;
+ size_t value = ++counter;
+#else // ABSL_HAVE_THREAD_LOCAL
+ static std::atomic<size_t> counter(0);
+ size_t value = counter.fetch_add(1, std::memory_order_relaxed);
+#endif // ABSL_HAVE_THREAD_LOCAL
+ return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
+}
+
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
- // To avoid problems with weak hashes and single bit tests, we use % 13.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
-}
-
+ // To avoid problems with weak hashes and single bit tests, we use % 13.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == ctrl_t::kSentinel);
assert(IsValidCapacity(capacity));
@@ -62,6 +62,6 @@ void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
// Extern template instantiotion for inline function.
template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-} // namespace container_internal
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 12682b3532..1364f8cca7 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -1,92 +1,92 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// An open-addressing
-// hashtable with quadratic probing.
-//
-// This is a low level hashtable on top of which different interfaces can be
-// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
-//
-// The table interface is similar to that of std::unordered_set. Notable
-// differences are that most member functions support heterogeneous keys when
-// BOTH the hash and eq functions are marked as transparent. They do so by
-// providing a typedef called `is_transparent`.
-//
-// When heterogeneous lookup is enabled, functions that take key_type act as if
-// they have an overload set like:
-//
-// iterator find(const key_type& key);
-// template <class K>
-// iterator find(const K& key);
-//
-// size_type erase(const key_type& key);
-// template <class K>
-// size_type erase(const K& key);
-//
-// std::pair<iterator, iterator> equal_range(const key_type& key);
-// template <class K>
-// std::pair<iterator, iterator> equal_range(const K& key);
-//
-// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
-// exist.
-//
-// find() also supports passing the hash explicitly:
-//
-// iterator find(const key_type& key, size_t hash);
-// template <class U>
-// iterator find(const U& key, size_t hash);
-//
-// In addition the pointer to element and iterator stability guarantees are
-// weaker: all iterators and pointers are invalidated after a new element is
-// inserted.
-//
-// IMPLEMENTATION DETAILS
-//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
-//
-// Group 1 Group 2 Group 3
-// +---------------+---------------+---------------+
-// | | | | | | | | | | | | | | | | | | | | | | | | |
-// +---------------+---------------+---------------+
-//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
-//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
-//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
-//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
-//
-// P(0) = H1 % N
-//
-// P(i) = (P(i - 1) + i) % N
-//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An open-addressing
+// hashtable with quadratic probing.
+//
+// This is a low level hashtable on top of which different interfaces can be
+// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
+//
+// The table interface is similar to that of std::unordered_set. Notable
+// differences are that most member functions support heterogeneous keys when
+// BOTH the hash and eq functions are marked as transparent. They do so by
+// providing a typedef called `is_transparent`.
+//
+// When heterogeneous lookup is enabled, functions that take key_type act as if
+// they have an overload set like:
+//
+// iterator find(const key_type& key);
+// template <class K>
+// iterator find(const K& key);
+//
+// size_type erase(const key_type& key);
+// template <class K>
+// size_type erase(const K& key);
+//
+// std::pair<iterator, iterator> equal_range(const key_type& key);
+// template <class K>
+// std::pair<iterator, iterator> equal_range(const K& key);
+//
+// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
+// exist.
+//
+// find() also supports passing the hash explicitly:
+//
+// iterator find(const key_type& key, size_t hash);
+// template <class U>
+// iterator find(const U& key, size_t hash);
+//
+// In addition the pointer to element and iterator stability guarantees are
+// weaker: all iterators and pointers are invalidated after a new element is
+// inserted.
+//
+// IMPLEMENTATION DETAILS
+//
+// The table stores elements inline in a slot array. In addition to the slot
+// array the table maintains some control state per slot. The extra state is one
+// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
+// the hash of an occupied slot. The table is split into logical groups of
+// slots, like so:
+//
+// Group 1 Group 2 Group 3
+// +---------------+---------------+---------------+
+// | | | | | | | | | | | | | | | | | | | | | | | | |
+// +---------------+---------------+---------------+
+//
+// On lookup the hash is split into two parts:
+// - H2: 7 bits (those stored in the control bytes)
+// - H1: the rest of the bits
+// The groups are probed using H1. For each group the slots are matched to H2 in
+// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
+// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+//
+// On insert, once the right group is found (as in lookup), its slots are
+// filled in order.
+//
+// On erase a slot is cleared. In case the group did not have any empty slots
+// before the erase, the erased slot is marked as deleted.
+//
+// Groups without empty slots (but maybe with deleted slots) extend the probe
+// sequence. The probing algorithm is quadratic. Given N the number of groups,
+// the probing function for the i'th probe is:
+//
+// P(0) = H1 % N
+//
+// P(i) = (P(i - 1) + i) % N
+//
+// This probing function guarantees that after N probes, all the groups of the
+// table will be probed exactly once.
//
// The control state and slot array are stored contiguously in a shared heap
// allocation. The layout of this allocation is: `capacity()` control bytes,
@@ -98,40 +98,40 @@
// which there are more than `capacity()` cloned control bytes, the extra bytes
// are `kEmpty`, and these ensure that we always see at least one empty slot and
// can stop an unsuccessful search.
-
-#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
-#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
-
-#include <algorithm>
-#include <cmath>
-#include <cstdint>
-#include <cstring>
-#include <iterator>
-#include <limits>
-#include <memory>
-#include <tuple>
-#include <type_traits>
-#include <utility>
-
-#include "absl/base/internal/endian.h"
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/endian.h"
#include "absl/base/optimization.h"
-#include "absl/base/port.h"
-#include "absl/container/internal/common.h"
-#include "absl/container/internal/compressed_tuple.h"
-#include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_policy_traits.h"
-#include "absl/container/internal/hashtable_debug_hooks.h"
-#include "absl/container/internal/hashtablez_sampler.h"
-#include "absl/container/internal/have_sse.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_policy_traits.h"
+#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
-#include "absl/utility/utility.h"
-
-namespace absl {
+#include "absl/utility/utility.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
+namespace container_internal {
+
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
std::true_type /* propagate_on_container_swap */) {
@@ -142,141 +142,141 @@ template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
-template <size_t Width>
-class probe_seq {
- public:
- probe_seq(size_t hash, size_t mask) {
- assert(((mask + 1) & mask) == 0 && "not a mask");
- mask_ = mask;
- offset_ = hash & mask_;
- }
- size_t offset() const { return offset_; }
- size_t offset(size_t i) const { return (offset_ + i) & mask_; }
-
- void next() {
- index_ += Width;
- offset_ += index_;
- offset_ &= mask_;
- }
- // 0-based probe index. The i-th probe in the probe sequence.
- size_t index() const { return index_; }
-
- private:
- size_t mask_;
- size_t offset_;
- size_t index_ = 0;
-};
-
-template <class ContainerKey, class Hash, class Eq>
-struct RequireUsableKey {
- template <class PassedKey, class... Args>
- std::pair<
- decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
- decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
- std::declval<const PassedKey&>()))>*
- operator()(const PassedKey&, const Args&...) const;
-};
-
-template <class E, class Policy, class Hash, class Eq, class... Ts>
-struct IsDecomposable : std::false_type {};
-
-template <class Policy, class Hash, class Eq, class... Ts>
-struct IsDecomposable<
- absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
- Policy, Hash, Eq, Ts...> : std::true_type {};
-
-// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
-template <class T>
+template <size_t Width>
+class probe_seq {
+ public:
+ probe_seq(size_t hash, size_t mask) {
+ assert(((mask + 1) & mask) == 0 && "not a mask");
+ mask_ = mask;
+ offset_ = hash & mask_;
+ }
+ size_t offset() const { return offset_; }
+ size_t offset(size_t i) const { return (offset_ + i) & mask_; }
+
+ void next() {
+ index_ += Width;
+ offset_ += index_;
+ offset_ &= mask_;
+ }
+ // 0-based probe index. The i-th probe in the probe sequence.
+ size_t index() const { return index_; }
+
+ private:
+ size_t mask_;
+ size_t offset_;
+ size_t index_ = 0;
+};
+
+template <class ContainerKey, class Hash, class Eq>
+struct RequireUsableKey {
+ template <class PassedKey, class... Args>
+ std::pair<
+ decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
+ decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
+ std::declval<const PassedKey&>()))>*
+ operator()(const PassedKey&, const Args&...) const;
+};
+
+template <class E, class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable : std::false_type {};
+
+template <class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable<
+ absl::void_t<decltype(
+ Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
+ Policy, Hash, Eq, Ts...> : std::true_type {};
+
+// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
+template <class T>
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
- using std::swap;
- return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
-}
+ using std::swap;
+ return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+}
template <class T>
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
return false;
}
-
-template <typename T>
+
+template <typename T>
uint32_t TrailingZeros(T x) {
ABSL_INTERNAL_ASSUME(x != 0);
return countr_zero(x);
-}
-
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
-//
-// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
-// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
-template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
-
- public:
- // These are useful for unit tests (gunit).
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
-
- explicit BitMask(T mask) : mask_(mask) {}
- BitMask& operator++() {
- mask_ &= (mask_ - 1);
- return *this;
- }
- explicit operator bool() const { return mask_ != 0; }
- int operator*() const { return LowestBitSet(); }
+}
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
+// this is a true bitmask. On non-SSE, platforms the arithematic used to
+// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
+// either 0x00 or 0x80.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ // These are useful for unit tests (gunit).
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ explicit BitMask(T mask) : mask_(mask) {}
+ BitMask& operator++() {
+ mask_ &= (mask_ - 1);
+ return *this;
+ }
+ explicit operator bool() const { return mask_ != 0; }
+ int operator*() const { return LowestBitSet(); }
uint32_t LowestBitSet() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
uint32_t HighestBitSet() const {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
- }
-
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
-
+ }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
uint32_t TrailingZeros() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
-
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
uint32_t LeadingZeros() const {
- constexpr int total_significant_bits = SignificantBits << Shift;
- constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
return countl_zero(mask_ << extra_bits) >> Shift;
- }
-
- private:
- friend bool operator==(const BitMask& a, const BitMask& b) {
- return a.mask_ == b.mask_;
- }
- friend bool operator!=(const BitMask& a, const BitMask& b) {
- return a.mask_ != b.mask_;
- }
-
- T mask_;
-};
-
-using h2_t = uint8_t;
-
-// The values here are selected for maximum performance. See the static asserts
+ }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+
+ T mask_;
+};
+
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
// below for details. We use an enum class so that when strict aliasing is
// enabled, the compiler knows ctrl_t doesn't alias other types.
enum class ctrl_t : int8_t {
- kEmpty = -128, // 0b10000000
- kDeleted = -2, // 0b11111110
- kSentinel = -1, // 0b11111111
-};
-static_assert(
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
(static_cast<int8_t>(ctrl_t::kEmpty) &
static_cast<int8_t>(ctrl_t::kDeleted) &
static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
- "Special markers need to have the MSB to make checking for them efficient");
+ "Special markers need to have the MSB to make checking for them efficient");
static_assert(
ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
"ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
@@ -287,7 +287,7 @@ static_assert(
"registers (pcmpeqd xmm, xmm)");
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
"ctrl_t::kEmpty must be -128 to make the SIMD check for its "
- "existence efficient (psignb xmm, xmm)");
+ "existence efficient (psignb xmm, xmm)");
static_assert(
(~static_cast<int8_t>(ctrl_t::kEmpty) &
~static_cast<int8_t>(ctrl_t::kDeleted) &
@@ -297,196 +297,196 @@ static_assert(
"MatchEmptyOrDeleted() efficient");
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
"ctrl_t::kDeleted must be -2 to make the implementation of "
- "ConvertSpecialToEmptyAndFullToDeleted efficient");
-
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
ABSL_DLL extern const ctrl_t kEmptyGroup[16];
-inline ctrl_t* EmptyGroup() {
+inline ctrl_t* EmptyGroup() {
return const_cast<ctrl_t*>(kEmptyGroup);
-}
-
-// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
-// randomize insertion order within groups.
+}
+
+// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
+// randomize insertion order within groups.
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-
-// Returns a hash seed.
-//
-// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
-// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
- // The low bits of the pointer have little or no entropy because of
- // alignment. We shift the pointer to try to use higher entropy bits. A
- // good number seems to be 12 bits, because that aligns with page size.
- return reinterpret_cast<uintptr_t>(ctrl) >> 12;
-}
-
-inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
-}
+
+// Returns a hash seed.
+//
+// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
+// non-determinism of iteration order in most cases.
+inline size_t HashSeed(const ctrl_t* ctrl) {
+ // The low bits of the pointer have little or no entropy because of
+ // alignment. We shift the pointer to try to use higher entropy bits. A
+ // good number seems to be 12 bits, because that aligns with page size.
+ return reinterpret_cast<uintptr_t>(ctrl) >> 12;
+}
+
+inline size_t H1(size_t hash, const ctrl_t* ctrl) {
+ return (hash >> 7) ^ HashSeed(ctrl);
+}
inline h2_t H2(size_t hash) { return hash & 0x7F; }
-
+
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
-
+
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-
-// https://github.com/abseil/abseil-cpp/issues/209
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
-// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
-// Work around this by using the portable implementation of Group
-// when using -funsigned-char under GCC.
-inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
-#if defined(__GNUC__) && !defined(__clang__)
- if (std::is_unsigned<char>::value) {
- const __m128i mask = _mm_set1_epi8(0x80);
- const __m128i diff = _mm_subs_epi8(b, a);
- return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
- }
-#endif
- return _mm_cmpgt_epi8(a, b);
-}
-
-struct GroupSse2Impl {
- static constexpr size_t kWidth = 16; // the number of slots per group
-
- explicit GroupSse2Impl(const ctrl_t* pos) {
- ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
- }
-
- // Returns a bitmask representing the positions of slots that match hash.
- BitMask<uint32_t, kWidth> Match(h2_t hash) const {
- auto match = _mm_set1_epi8(hash);
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
- }
-
- // Returns a bitmask representing the positions of empty slots.
- BitMask<uint32_t, kWidth> MatchEmpty() const {
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match hash.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(hash);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
// This only works because ctrl_t::kEmpty is -128.
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
-#else
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
return Match(static_cast<h2_t>(ctrl_t::kEmpty));
-#endif
- }
-
- // Returns a bitmask representing the positions of empty or deleted slots.
- BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+#endif
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
- }
-
- // Returns the number of trailing empty or deleted elements in the group.
- uint32_t CountLeadingEmptyOrDeleted() const {
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
- }
-
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- auto msbs = _mm_set1_epi8(static_cast<char>(-128));
- auto x126 = _mm_set1_epi8(126);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
- auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
-#else
- auto zero = _mm_setzero_si128();
- auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
- auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
-#endif
- _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
- }
-
- __m128i ctrl;
-};
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-
-struct GroupPortableImpl {
- static constexpr size_t kWidth = 8;
-
- explicit GroupPortableImpl(const ctrl_t* pos)
- : ctrl(little_endian::Load64(pos)) {}
-
- BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
- // For the technique, see:
- // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
- // (Determine if a word has a byte equal to n).
- //
- // Caveat: there are false positives but:
- // - they only occur if there is a real match
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(little_endian::Load64(pos)) {}
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if there is a real match
// - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
- // - they will be handled gracefully by subsequent checks in code
- //
- // Example:
- // v = 0x1716151413121110
- // hash = 0x12
- // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl ^ (lsbs * hash);
- return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
- }
-
- BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
- }
-
- BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
- }
-
- uint32_t CountLeadingEmptyOrDeleted() const {
- constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
- return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
- }
-
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl & msbs;
- auto res = (~x + (x >> 7)) & ~lsbs;
- little_endian::Store64(dst, res);
- }
-
- uint64_t ctrl;
-};
-
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+ return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint64_t ctrl;
+};
+
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-using Group = GroupSse2Impl;
-#else
-using Group = GroupPortableImpl;
-#endif
-
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
// The number of cloned control bytes that we copy from the beginning to the
// end of the control bytes array.
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
-template <class Policy, class Hash, class Eq, class Alloc>
-class raw_hash_set;
-
-inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-
-// PRECONDITION:
-// IsValidCapacity(capacity)
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// PRECONDITION:
+// IsValidCapacity(capacity)
// ctrl[capacity] == ctrl_t::kSentinel
// ctrl[i] != ctrl_t::kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
+// Applies mapping for every byte in ctrl:
+// DELETED -> EMPTY
+// EMPTY -> EMPTY
+// FULL -> DELETED
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
-inline size_t NormalizeCapacity(size_t n) {
+
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
-}
-
+}
+
// General notes on capacity/growth methods below:
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
// average of two empty slots per group.
@@ -497,26 +497,26 @@ inline size_t NormalizeCapacity(size_t n) {
// Given `capacity` of the table, returns the size (i.e. number of full slots)
// at which we should grow the capacity.
-inline size_t CapacityToGrowth(size_t capacity) {
- assert(IsValidCapacity(capacity));
- // `capacity*7/8`
- if (Group::kWidth == 8 && capacity == 7) {
- // x-x/8 does not work when x==7.
- return 6;
- }
- return capacity - capacity / 8;
-}
-// From desired "growth" to a lowerbound of the necessary capacity.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
+ }
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
// Might not be a valid one and requires NormalizeCapacity().
-inline size_t GrowthToLowerboundCapacity(size_t growth) {
- // `growth*8/7`
- if (Group::kWidth == 8 && growth == 7) {
- // x+(x-1)/7 does not work when x==7.
- return 8;
- }
- return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
-}
-
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
+
template <class InputIter>
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
size_t bucket_count) {
@@ -655,589 +655,589 @@ inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
-// Policy: a policy defines how to perform different operations on
-// the slots of the hashtable (see hash_policy_traits.h for the full interface
-// of policy).
-//
-// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
-// functor should accept a key and return size_t as hash. For best performance
-// it is important that the hash function provides high entropy across all bits
-// of the hash.
-//
-// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
-// should accept two (of possibly different type) keys and return a bool: true
-// if they are equal, false if they are not. If two keys compare equal, then
-// their hash values as defined by Hash MUST be equal.
-//
+// Policy: a policy defines how to perform different operations on
+// the slots of the hashtable (see hash_policy_traits.h for the full interface
+// of policy).
+//
+// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
+// functor should accept a key and return size_t as hash. For best performance
+// it is important that the hash function provides high entropy across all bits
+// of the hash.
+//
+// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
+// should accept two (of possibly different type) keys and return a bool: true
+// if they are equal, false if they are not. If two keys compare equal, then
+// their hash values as defined by Hash MUST be equal.
+//
// Allocator: an Allocator
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
-// the storage of the hashtable will be allocated and the elements will be
-// constructed and destroyed.
-template <class Policy, class Hash, class Eq, class Alloc>
-class raw_hash_set {
- using PolicyTraits = hash_policy_traits<Policy>;
- using KeyArgImpl =
- KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
-
- public:
- using init_type = typename PolicyTraits::init_type;
- using key_type = typename PolicyTraits::key_type;
- // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
- // code fixes!
- using slot_type = typename PolicyTraits::slot_type;
- using allocator_type = Alloc;
- using size_type = size_t;
- using difference_type = ptrdiff_t;
- using hasher = Hash;
- using key_equal = Eq;
- using policy_type = Policy;
- using value_type = typename PolicyTraits::value_type;
- using reference = value_type&;
- using const_reference = const value_type&;
- using pointer = typename absl::allocator_traits<
- allocator_type>::template rebind_traits<value_type>::pointer;
- using const_pointer = typename absl::allocator_traits<
- allocator_type>::template rebind_traits<value_type>::const_pointer;
-
- // Alias used for heterogeneous lookup functions.
- // `key_arg<K>` evaluates to `K` when the functors are transparent and to
- // `key_type` otherwise. It permits template argument deduction on `K` for the
- // transparent case.
- template <class K>
- using key_arg = typename KeyArgImpl::template type<K, key_type>;
-
- private:
- // Give an early error when key_type is not hashable/eq.
- auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
- auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
-
- using AllocTraits = absl::allocator_traits<allocator_type>;
- using SlotAlloc = typename absl::allocator_traits<
- allocator_type>::template rebind_alloc<slot_type>;
- using SlotAllocTraits = typename absl::allocator_traits<
- allocator_type>::template rebind_traits<slot_type>;
-
- static_assert(std::is_lvalue_reference<reference>::value,
- "Policy::element() must return a reference");
-
- template <typename T>
- struct SameAsElementReference
- : std::is_same<typename std::remove_cv<
- typename std::remove_reference<reference>::type>::type,
- typename std::remove_cv<
- typename std::remove_reference<T>::type>::type> {};
-
- // An enabler for insert(T&&): T must be convertible to init_type or be the
- // same as [cv] value_type [ref].
- // Note: we separate SameAsElementReference into its own type to avoid using
- // reference unless we need to. MSVC doesn't seem to like it in some
- // cases.
- template <class T>
- using RequiresInsertable = typename std::enable_if<
- absl::disjunction<std::is_convertible<T, init_type>,
- SameAsElementReference<T>>::value,
- int>::type;
-
- // RequiresNotInit is a workaround for gcc prior to 7.1.
- // See https://godbolt.org/g/Y4xsUh.
- template <class T>
- using RequiresNotInit =
- typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
-
- template <class... Ts>
- using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
-
- public:
- static_assert(std::is_same<pointer, value_type*>::value,
- "Allocators with custom pointer types are not supported");
- static_assert(std::is_same<const_pointer, const value_type*>::value,
- "Allocators with custom pointer types are not supported");
-
- class iterator {
- friend class raw_hash_set;
-
- public:
- using iterator_category = std::forward_iterator_tag;
- using value_type = typename raw_hash_set::value_type;
- using reference =
- absl::conditional_t<PolicyTraits::constant_iterators::value,
- const value_type&, value_type&>;
- using pointer = absl::remove_reference_t<reference>*;
- using difference_type = typename raw_hash_set::difference_type;
-
- iterator() {}
-
- // PRECONDITION: not an end() iterator.
- reference operator*() const {
+// the storage of the hashtable will be allocated and the elements will be
+// constructed and destroyed.
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set {
+ using PolicyTraits = hash_policy_traits<Policy>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using init_type = typename PolicyTraits::init_type;
+ using key_type = typename PolicyTraits::key_type;
+ // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
+ // code fixes!
+ using slot_type = typename PolicyTraits::slot_type;
+ using allocator_type = Alloc;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = Eq;
+ using policy_type = Policy;
+ using value_type = typename PolicyTraits::value_type;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::pointer;
+ using const_pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::const_pointer;
+
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ private:
+ // Give an early error when key_type is not hashable/eq.
+ auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
+ auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
+
+ using AllocTraits = absl::allocator_traits<allocator_type>;
+ using SlotAlloc = typename absl::allocator_traits<
+ allocator_type>::template rebind_alloc<slot_type>;
+ using SlotAllocTraits = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<slot_type>;
+
+ static_assert(std::is_lvalue_reference<reference>::value,
+ "Policy::element() must return a reference");
+
+ template <typename T>
+ struct SameAsElementReference
+ : std::is_same<typename std::remove_cv<
+ typename std::remove_reference<reference>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type> {};
+
+ // An enabler for insert(T&&): T must be convertible to init_type or be the
+ // same as [cv] value_type [ref].
+ // Note: we separate SameAsElementReference into its own type to avoid using
+ // reference unless we need to. MSVC doesn't seem to like it in some
+ // cases.
+ template <class T>
+ using RequiresInsertable = typename std::enable_if<
+ absl::disjunction<std::is_convertible<T, init_type>,
+ SameAsElementReference<T>>::value,
+ int>::type;
+
+ // RequiresNotInit is a workaround for gcc prior to 7.1.
+ // See https://godbolt.org/g/Y4xsUh.
+ template <class T>
+ using RequiresNotInit =
+ typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
+
+ template <class... Ts>
+ using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
+
+ public:
+ static_assert(std::is_same<pointer, value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+ static_assert(std::is_same<const_pointer, const value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+
+ class iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename raw_hash_set::value_type;
+ using reference =
+ absl::conditional_t<PolicyTraits::constant_iterators::value,
+ const value_type&, value_type&>;
+ using pointer = absl::remove_reference_t<reference>*;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ iterator() {}
+
+ // PRECONDITION: not an end() iterator.
+ reference operator*() const {
AssertIsFull(ctrl_);
- return PolicyTraits::element(slot_);
- }
-
- // PRECONDITION: not an end() iterator.
- pointer operator->() const { return &operator*(); }
-
- // PRECONDITION: not an end() iterator.
- iterator& operator++() {
+ return PolicyTraits::element(slot_);
+ }
+
+ // PRECONDITION: not an end() iterator.
+ pointer operator->() const { return &operator*(); }
+
+ // PRECONDITION: not an end() iterator.
+ iterator& operator++() {
AssertIsFull(ctrl_);
- ++ctrl_;
- ++slot_;
- skip_empty_or_deleted();
- return *this;
- }
- // PRECONDITION: not an end() iterator.
- iterator operator++(int) {
- auto tmp = *this;
- ++*this;
- return tmp;
- }
-
- friend bool operator==(const iterator& a, const iterator& b) {
+ ++ctrl_;
+ ++slot_;
+ skip_empty_or_deleted();
+ return *this;
+ }
+ // PRECONDITION: not an end() iterator.
+ iterator operator++(int) {
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ friend bool operator==(const iterator& a, const iterator& b) {
AssertIsValid(a.ctrl_);
AssertIsValid(b.ctrl_);
- return a.ctrl_ == b.ctrl_;
- }
- friend bool operator!=(const iterator& a, const iterator& b) {
- return !(a == b);
- }
-
- private:
+ return a.ctrl_ == b.ctrl_;
+ }
+ friend bool operator!=(const iterator& a, const iterator& b) {
+ return !(a == b);
+ }
+
+ private:
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
ABSL_INTERNAL_ASSUME(ctrl != nullptr);
- }
-
- void skip_empty_or_deleted() {
- while (IsEmptyOrDeleted(*ctrl_)) {
- uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
- ctrl_ += shift;
- slot_ += shift;
- }
+ }
+
+ void skip_empty_or_deleted() {
+ while (IsEmptyOrDeleted(*ctrl_)) {
+ uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+ ctrl_ += shift;
+ slot_ += shift;
+ }
if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
- }
-
- ctrl_t* ctrl_ = nullptr;
- // To avoid uninitialized member warnings, put slot_ in an anonymous union.
- // The member is not initialized on singleton and end iterators.
- union {
- slot_type* slot_;
- };
- };
-
- class const_iterator {
- friend class raw_hash_set;
-
- public:
- using iterator_category = typename iterator::iterator_category;
- using value_type = typename raw_hash_set::value_type;
- using reference = typename raw_hash_set::const_reference;
- using pointer = typename raw_hash_set::const_pointer;
- using difference_type = typename raw_hash_set::difference_type;
-
- const_iterator() {}
- // Implicit construction from iterator.
- const_iterator(iterator i) : inner_(std::move(i)) {}
-
- reference operator*() const { return *inner_; }
- pointer operator->() const { return inner_.operator->(); }
-
- const_iterator& operator++() {
- ++inner_;
- return *this;
- }
- const_iterator operator++(int) { return inner_++; }
-
- friend bool operator==(const const_iterator& a, const const_iterator& b) {
- return a.inner_ == b.inner_;
- }
- friend bool operator!=(const const_iterator& a, const const_iterator& b) {
- return !(a == b);
- }
-
- private:
- const_iterator(const ctrl_t* ctrl, const slot_type* slot)
- : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
-
- iterator inner_;
- };
-
- using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
- using insert_return_type = InsertReturnType<iterator, node_type>;
-
- raw_hash_set() noexcept(
- std::is_nothrow_default_constructible<hasher>::value&&
- std::is_nothrow_default_constructible<key_equal>::value&&
- std::is_nothrow_default_constructible<allocator_type>::value) {}
-
- explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
- const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
+ }
+
+ ctrl_t* ctrl_ = nullptr;
+ // To avoid uninitialized member warnings, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
+ };
+
+ class const_iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename raw_hash_set::value_type;
+ using reference = typename raw_hash_set::const_reference;
+ using pointer = typename raw_hash_set::const_pointer;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ const_iterator() {}
+ // Implicit construction from iterator.
+ const_iterator(iterator i) : inner_(std::move(i)) {}
+
+ reference operator*() const { return *inner_; }
+ pointer operator->() const { return inner_.operator->(); }
+
+ const_iterator& operator++() {
+ ++inner_;
+ return *this;
+ }
+ const_iterator operator++(int) { return inner_++; }
+
+ friend bool operator==(const const_iterator& a, const const_iterator& b) {
+ return a.inner_ == b.inner_;
+ }
+ friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ const_iterator(const ctrl_t* ctrl, const slot_type* slot)
+ : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+
+ iterator inner_;
+ };
+
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ raw_hash_set() noexcept(
+ std::is_nothrow_default_constructible<hasher>::value&&
+ std::is_nothrow_default_constructible<key_equal>::value&&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+ explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()),
settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
- if (bucket_count) {
- capacity_ = NormalizeCapacity(bucket_count);
- initialize_slots();
- }
- }
-
- raw_hash_set(size_t bucket_count, const hasher& hash,
- const allocator_type& alloc)
- : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
-
- raw_hash_set(size_t bucket_count, const allocator_type& alloc)
- : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
-
- explicit raw_hash_set(const allocator_type& alloc)
- : raw_hash_set(0, hasher(), key_equal(), alloc) {}
-
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
+ if (bucket_count) {
+ capacity_ = NormalizeCapacity(bucket_count);
+ initialize_slots();
+ }
+ }
+
+ raw_hash_set(size_t bucket_count, const hasher& hash,
+ const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(size_t bucket_count, const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
+
+ explicit raw_hash_set(const allocator_type& alloc)
+ : raw_hash_set(0, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
: raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
hash, eq, alloc) {
- insert(first, last);
- }
-
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
-
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
-
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
- : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
-
- // Instead of accepting std::initializer_list<value_type> as the first
- // argument like std::unordered_set<value_type> does, we have two overloads
- // that accept std::initializer_list<T> and std::initializer_list<init_type>.
- // This is advantageous for performance.
- //
- // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
- // // copies the strings into the set.
- // std::unordered_set<std::string> s = {"abc", "def"};
- //
- // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
- // // copies the strings into the set.
- // absl::flat_hash_set<std::string> s = {"abc", "def"};
- //
- // The same trick is used in insert().
- //
- // The enabler is necessary to prevent this constructor from triggering where
- // the copy constructor is meant to be called.
- //
- // absl::flat_hash_set<int> a, b{a};
- //
- // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
-
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
-
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
-
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
-
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
-
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
-
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
- : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
-
- raw_hash_set(std::initializer_list<init_type> init,
- const allocator_type& alloc)
- : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
-
- raw_hash_set(const raw_hash_set& that)
- : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
- that.alloc_ref())) {}
-
- raw_hash_set(const raw_hash_set& that, const allocator_type& a)
- : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
- reserve(that.size());
- // Because the table is guaranteed to be empty, we can do something faster
- // than a full `insert`.
- for (const auto& v : that) {
- const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
+ insert(first, last);
+ }
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
+ : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
+
+ // Instead of accepting std::initializer_list<value_type> as the first
+ // argument like std::unordered_set<value_type> does, we have two overloads
+ // that accept std::initializer_list<T> and std::initializer_list<init_type>.
+ // This is advantageous for performance.
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+ // // copies the strings into the set.
+ // std::unordered_set<std::string> s = {"abc", "def"};
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
+ // // copies the strings into the set.
+ // absl::flat_hash_set<std::string> s = {"abc", "def"};
+ //
+ // The same trick is used in insert().
+ //
+ // The enabler is necessary to prevent this constructor from triggering where
+ // the copy constructor is meant to be called.
+ //
+ // absl::flat_hash_set<int> a, b{a};
+ //
+ // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init,
+ const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(const raw_hash_set& that)
+ : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
+ that.alloc_ref())) {}
+
+ raw_hash_set(const raw_hash_set& that, const allocator_type& a)
+ : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+ reserve(that.size());
+ // Because the table is guaranteed to be empty, we can do something faster
+ // than a full `insert`.
+ for (const auto& v : that) {
+ const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(ctrl_, hash, capacity_);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
- emplace_at(target.offset, v);
+ emplace_at(target.offset, v);
infoz().RecordInsert(hash, target.probe_length);
- }
- size_ = that.size();
- growth_left() -= that.size();
- }
-
- raw_hash_set(raw_hash_set&& that) noexcept(
- std::is_nothrow_copy_constructible<hasher>::value&&
- std::is_nothrow_copy_constructible<key_equal>::value&&
- std::is_nothrow_copy_constructible<allocator_type>::value)
- : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
- slots_(absl::exchange(that.slots_, nullptr)),
- size_(absl::exchange(that.size_, 0)),
- capacity_(absl::exchange(that.capacity_, 0)),
- // Hash, equality and allocator are copied instead of moved because
- // `that` must be left valid. If Hash is std::function<Key>, moving it
- // would create a nullptr functor that cannot be called.
+ }
+ size_ = that.size();
+ growth_left() -= that.size();
+ }
+
+ raw_hash_set(raw_hash_set&& that) noexcept(
+ std::is_nothrow_copy_constructible<hasher>::value&&
+ std::is_nothrow_copy_constructible<key_equal>::value&&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
+ : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
+ slots_(absl::exchange(that.slots_, nullptr)),
+ size_(absl::exchange(that.size_, 0)),
+ capacity_(absl::exchange(that.capacity_, 0)),
+ // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
settings_(absl::exchange(that.growth_left(), 0),
absl::exchange(that.infoz(), HashtablezInfoHandle()),
that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
-
- raw_hash_set(raw_hash_set&& that, const allocator_type& a)
- : ctrl_(EmptyGroup()),
- slots_(nullptr),
- size_(0),
- capacity_(0),
+
+ raw_hash_set(raw_hash_set&& that, const allocator_type& a)
+ : ctrl_(EmptyGroup()),
+ slots_(nullptr),
+ size_(0),
+ capacity_(0),
settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
a) {
- if (a == that.alloc_ref()) {
- std::swap(ctrl_, that.ctrl_);
- std::swap(slots_, that.slots_);
- std::swap(size_, that.size_);
- std::swap(capacity_, that.capacity_);
- std::swap(growth_left(), that.growth_left());
+ if (a == that.alloc_ref()) {
+ std::swap(ctrl_, that.ctrl_);
+ std::swap(slots_, that.slots_);
+ std::swap(size_, that.size_);
+ std::swap(capacity_, that.capacity_);
+ std::swap(growth_left(), that.growth_left());
std::swap(infoz(), that.infoz());
- } else {
- reserve(that.size());
- // Note: this will copy elements of dense_set and unordered_set instead of
- // moving them. This can be fixed if it ever becomes an issue.
- for (auto& elem : that) insert(std::move(elem));
- }
- }
-
- raw_hash_set& operator=(const raw_hash_set& that) {
- raw_hash_set tmp(that,
- AllocTraits::propagate_on_container_copy_assignment::value
- ? that.alloc_ref()
- : alloc_ref());
- swap(tmp);
- return *this;
- }
-
- raw_hash_set& operator=(raw_hash_set&& that) noexcept(
- absl::allocator_traits<allocator_type>::is_always_equal::value&&
- std::is_nothrow_move_assignable<hasher>::value&&
- std::is_nothrow_move_assignable<key_equal>::value) {
- // TODO(sbenza): We should only use the operations from the noexcept clause
- // to make sure we actually adhere to that contract.
- return move_assign(
- std::move(that),
- typename AllocTraits::propagate_on_container_move_assignment());
- }
-
- ~raw_hash_set() { destroy_slots(); }
-
- iterator begin() {
- auto it = iterator_at(0);
- it.skip_empty_or_deleted();
- return it;
- }
+ } else {
+ reserve(that.size());
+ // Note: this will copy elements of dense_set and unordered_set instead of
+ // moving them. This can be fixed if it ever becomes an issue.
+ for (auto& elem : that) insert(std::move(elem));
+ }
+ }
+
+ raw_hash_set& operator=(const raw_hash_set& that) {
+ raw_hash_set tmp(that,
+ AllocTraits::propagate_on_container_copy_assignment::value
+ ? that.alloc_ref()
+ : alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ raw_hash_set& operator=(raw_hash_set&& that) noexcept(
+ absl::allocator_traits<allocator_type>::is_always_equal::value&&
+ std::is_nothrow_move_assignable<hasher>::value&&
+ std::is_nothrow_move_assignable<key_equal>::value) {
+ // TODO(sbenza): We should only use the operations from the noexcept clause
+ // to make sure we actually adhere to that contract.
+ return move_assign(
+ std::move(that),
+ typename AllocTraits::propagate_on_container_move_assignment());
+ }
+
+ ~raw_hash_set() { destroy_slots(); }
+
+ iterator begin() {
+ auto it = iterator_at(0);
+ it.skip_empty_or_deleted();
+ return it;
+ }
iterator end() { return {}; }
-
- const_iterator begin() const {
- return const_cast<raw_hash_set*>(this)->begin();
- }
+
+ const_iterator begin() const {
+ return const_cast<raw_hash_set*>(this)->begin();
+ }
const_iterator end() const { return {}; }
- const_iterator cbegin() const { return begin(); }
- const_iterator cend() const { return end(); }
-
- bool empty() const { return !size(); }
- size_t size() const { return size_; }
- size_t capacity() const { return capacity_; }
- size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
-
- ABSL_ATTRIBUTE_REINITIALIZES void clear() {
- // Iterating over this container is O(bucket_count()). When bucket_count()
- // is much greater than size(), iteration becomes prohibitively expensive.
- // For clear() it is more important to reuse the allocated array when the
- // container is small because allocation takes comparatively long time
- // compared to destruction of the elements of the container. So we pick the
- // largest bucket_count() threshold for which iteration is still fast and
- // past that we simply deallocate the array.
- if (capacity_ > 127) {
- destroy_slots();
+ const_iterator cbegin() const { return begin(); }
+ const_iterator cend() const { return end(); }
+
+ bool empty() const { return !size(); }
+ size_t size() const { return size_; }
+ size_t capacity() const { return capacity_; }
+ size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
+
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
+ // Iterating over this container is O(bucket_count()). When bucket_count()
+ // is much greater than size(), iteration becomes prohibitively expensive.
+ // For clear() it is more important to reuse the allocated array when the
+ // container is small because allocation takes comparatively long time
+ // compared to destruction of the elements of the container. So we pick the
+ // largest bucket_count() threshold for which iteration is still fast and
+ // past that we simply deallocate the array.
+ if (capacity_ > 127) {
+ destroy_slots();
infoz().RecordClearedReservation();
- } else if (capacity_) {
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
- }
- size_ = 0;
+ } else if (capacity_) {
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ size_ = 0;
ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
- reset_growth_left();
- }
- assert(empty());
+ reset_growth_left();
+ }
+ assert(empty());
infoz().RecordStorageChanged(0, capacity_);
- }
-
- // This overload kicks in when the argument is an rvalue of insertable and
- // decomposable type other than init_type.
- //
- // flat_hash_map<std::string, int> m;
- // m.insert(std::make_pair("abc", 42));
- // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
- // bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
- typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
- T* = nullptr>
- std::pair<iterator, bool> insert(T&& value) {
- return emplace(std::forward<T>(value));
- }
-
- // This overload kicks in when the argument is a bitfield or an lvalue of
- // insertable and decomposable type.
- //
- // union { int n : 1; };
- // flat_hash_set<int> s;
- // s.insert(n);
- //
- // flat_hash_set<std::string> s;
- // const char* p = "hello";
- // s.insert(p);
- //
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
- template <
- class T, RequiresInsertable<T> = 0,
- typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- std::pair<iterator, bool> insert(const T& value) {
- return emplace(value);
- }
-
- // This overload kicks in when the argument is an rvalue of init_type. Its
- // purpose is to handle brace-init-list arguments.
- //
- // flat_hash_map<std::string, int> s;
- // s.insert({"abc", 42});
- std::pair<iterator, bool> insert(init_type&& value) {
- return emplace(std::move(value));
- }
-
- // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
- // bug.
- template <class T, RequiresInsertable<T> = 0, class T2 = T,
- typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
- T* = nullptr>
- iterator insert(const_iterator, T&& value) {
- return insert(std::forward<T>(value)).first;
- }
-
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
- template <
- class T, RequiresInsertable<T> = 0,
- typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- iterator insert(const_iterator, const T& value) {
- return insert(value).first;
- }
-
- iterator insert(const_iterator, init_type&& value) {
- return insert(std::move(value)).first;
- }
-
- template <class InputIt>
- void insert(InputIt first, InputIt last) {
+ }
+
+ // This overload kicks in when the argument is an rvalue of insertable and
+ // decomposable type other than init_type.
+ //
+ // flat_hash_map<std::string, int> m;
+ // m.insert(std::make_pair("abc", 42));
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0,
+ class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ std::pair<iterator, bool> insert(T&& value) {
+ return emplace(std::forward<T>(value));
+ }
+
+ // This overload kicks in when the argument is a bitfield or an lvalue of
+ // insertable and decomposable type.
+ //
+ // union { int n : 1; };
+ // flat_hash_set<int> s;
+ // s.insert(n);
+ //
+ // flat_hash_set<std::string> s;
+ // const char* p = "hello";
+ // s.insert(p);
+ //
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ std::pair<iterator, bool> insert(const T& value) {
+ return emplace(value);
+ }
+
+ // This overload kicks in when the argument is an rvalue of init_type. Its
+ // purpose is to handle brace-init-list arguments.
+ //
+ // flat_hash_map<std::string, int> s;
+ // s.insert({"abc", 42});
+ std::pair<iterator, bool> insert(init_type&& value) {
+ return emplace(std::move(value));
+ }
+
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ iterator insert(const_iterator, T&& value) {
+ return insert(std::forward<T>(value)).first;
+ }
+
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ iterator insert(const_iterator, const T& value) {
+ return insert(value).first;
+ }
+
+ iterator insert(const_iterator, init_type&& value) {
+ return insert(std::move(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
for (; first != last; ++first) emplace(*first);
- }
-
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
- void insert(std::initializer_list<T> ilist) {
- insert(ilist.begin(), ilist.end());
- }
-
- void insert(std::initializer_list<init_type> ilist) {
- insert(ilist.begin(), ilist.end());
- }
-
- insert_return_type insert(node_type&& node) {
- if (!node) return {end(), false, node_type()};
- const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
- auto res = PolicyTraits::apply(
- InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
- elem);
- if (res.second) {
- CommonAccess::Reset(&node);
- return {res.first, true, node_type()};
- } else {
- return {res.first, false, std::move(node)};
- }
- }
-
- iterator insert(const_iterator, node_type&& node) {
+ }
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
+ void insert(std::initializer_list<T> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ void insert(std::initializer_list<init_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ insert_return_type insert(node_type&& node) {
+ if (!node) return {end(), false, node_type()};
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
+ auto res = PolicyTraits::apply(
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
+ if (res.second) {
+ CommonAccess::Reset(&node);
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+
+ iterator insert(const_iterator, node_type&& node) {
auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
- }
-
- // This overload kicks in if we can deduce the key from args. This enables us
- // to avoid constructing value_type if an entry with the same key already
- // exists.
- //
- // For example:
- //
- // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
- // // Creates no std::string copies and makes no heap allocations.
- // m.emplace("abc", "xyz");
- template <class... Args, typename std::enable_if<
- IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
- return PolicyTraits::apply(EmplaceDecomposable{*this},
- std::forward<Args>(args)...);
- }
-
- // This overload kicks in if we cannot deduce the key from args. It constructs
- // value_type unconditionally and then either moves it into the table or
- // destroys.
- template <class... Args, typename std::enable_if<
- !IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ }
+
+ // This overload kicks in if we can deduce the key from args. This enables us
+ // to avoid constructing value_type if an entry with the same key already
+ // exists.
+ //
+ // For example:
+ //
+ // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
+ // // Creates no std::string copies and makes no heap allocations.
+ // m.emplace("abc", "xyz");
+ template <class... Args, typename std::enable_if<
+ IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return PolicyTraits::apply(EmplaceDecomposable{*this},
+ std::forward<Args>(args)...);
+ }
+
+ // This overload kicks in if we cannot deduce the key from args. It constructs
+ // value_type unconditionally and then either moves it into the table or
+ // destroys.
+ template <class... Args, typename std::enable_if<
+ !IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
-
- PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
- const auto& elem = PolicyTraits::element(slot);
- return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
- }
-
- template <class... Args>
- iterator emplace_hint(const_iterator, Args&&... args) {
- return emplace(std::forward<Args>(args)...).first;
- }
-
- // Extension API: support for lazy emplace.
- //
- // Looks up key in the table. If found, returns the iterator to the element.
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ const auto& elem = PolicyTraits::element(slot);
+ return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ // Extension API: support for lazy emplace.
+ //
+ // Looks up key in the table. If found, returns the iterator to the element.
// Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
- //
+ //
// `f` must abide by several restrictions:
// - it MUST call `raw_hash_set::constructor` with arguments as if a
// `raw_hash_set::value_type` is constructed,
@@ -1246,172 +1246,172 @@ class raw_hash_set {
// - it MUST NOT erase the lazily emplaced element.
// Doing any of these is undefined behavior.
//
- // For example:
- //
- // std::unordered_set<ArenaString> s;
- // // Makes ArenaStr even if "abc" is in the map.
- // s.insert(ArenaString(&arena, "abc"));
- //
- // flat_hash_set<ArenaStr> s;
- // // Makes ArenaStr only if "abc" is not in the map.
- // s.lazy_emplace("abc", [&](const constructor& ctor) {
- // ctor(&arena, "abc");
- // });
- //
- // WARNING: This API is currently experimental. If there is a way to implement
- // the same thing with the rest of the API, prefer that.
- class constructor {
- friend class raw_hash_set;
-
- public:
- template <class... Args>
- void operator()(Args&&... args) const {
- assert(*slot_);
- PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
- *slot_ = nullptr;
- }
-
- private:
- constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
-
- allocator_type* alloc_;
- slot_type** slot_;
- };
-
- template <class K = key_type, class F>
- iterator lazy_emplace(const key_arg<K>& key, F&& f) {
- auto res = find_or_prepare_insert(key);
- if (res.second) {
- slot_type* slot = slots_ + res.first;
- std::forward<F>(f)(constructor(&alloc_ref(), &slot));
- assert(!slot);
- }
- return iterator_at(res.first);
- }
-
- // Extension API: support for heterogeneous keys.
- //
- // std::unordered_set<std::string> s;
- // // Turns "abc" into std::string.
- // s.erase("abc");
- //
- // flat_hash_set<std::string> s;
- // // Uses "abc" directly without copying it into std::string.
- // s.erase("abc");
- template <class K = key_type>
- size_type erase(const key_arg<K>& key) {
- auto it = find(key);
- if (it == end()) return 0;
- erase(it);
- return 1;
- }
-
- // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
- // this method returns void to reduce algorithmic complexity to O(1). The
- // iterator is invalidated, so any increment should be done before calling
- // erase. In order to erase while iterating across a map, use the following
- // idiom (which also works for standard containers):
- //
- // for (auto it = m.begin(), end = m.end(); it != end;) {
- // // `erase()` will invalidate `it`, so advance `it` first.
- // auto copy_it = it++;
- // if (<pred>) {
- // m.erase(copy_it);
- // }
- // }
- void erase(const_iterator cit) { erase(cit.inner_); }
-
- // This overload is necessary because otherwise erase<K>(const K&) would be
- // a better match if non-const iterator is passed as an argument.
- void erase(iterator it) {
+ // For example:
+ //
+ // std::unordered_set<ArenaString> s;
+ // // Makes ArenaStr even if "abc" is in the map.
+ // s.insert(ArenaString(&arena, "abc"));
+ //
+ // flat_hash_set<ArenaStr> s;
+ // // Makes ArenaStr only if "abc" is not in the map.
+ // s.lazy_emplace("abc", [&](const constructor& ctor) {
+ // ctor(&arena, "abc");
+ // });
+ //
+ // WARNING: This API is currently experimental. If there is a way to implement
+ // the same thing with the rest of the API, prefer that.
+ class constructor {
+ friend class raw_hash_set;
+
+ public:
+ template <class... Args>
+ void operator()(Args&&... args) const {
+ assert(*slot_);
+ PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
+ *slot_ = nullptr;
+ }
+
+ private:
+ constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
+
+ allocator_type* alloc_;
+ slot_type** slot_;
+ };
+
+ template <class K = key_type, class F>
+ iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ auto res = find_or_prepare_insert(key);
+ if (res.second) {
+ slot_type* slot = slots_ + res.first;
+ std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+ assert(!slot);
+ }
+ return iterator_at(res.first);
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.erase("abc");
+ //
+ // flat_hash_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.erase("abc");
+ template <class K = key_type>
+ size_type erase(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it == end()) return 0;
+ erase(it);
+ return 1;
+ }
+
+ // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
+ // this method returns void to reduce algorithmic complexity to O(1). The
+ // iterator is invalidated, so any increment should be done before calling
+ // erase. In order to erase while iterating across a map, use the following
+ // idiom (which also works for standard containers):
+ //
+ // for (auto it = m.begin(), end = m.end(); it != end;) {
+ // // `erase()` will invalidate `it`, so advance `it` first.
+ // auto copy_it = it++;
+ // if (<pred>) {
+ // m.erase(copy_it);
+ // }
+ // }
+ void erase(const_iterator cit) { erase(cit.inner_); }
+
+ // This overload is necessary because otherwise erase<K>(const K&) would be
+ // a better match if non-const iterator is passed as an argument.
+ void erase(iterator it) {
AssertIsFull(it.ctrl_);
- PolicyTraits::destroy(&alloc_ref(), it.slot_);
- erase_meta_only(it);
- }
-
- iterator erase(const_iterator first, const_iterator last) {
- while (first != last) {
- erase(first++);
- }
- return last.inner_;
- }
-
- // Moves elements from `src` into `this`.
- // If the element already exists in `this`, it is left unmodified in `src`.
- template <typename H, typename E>
- void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
- assert(this != &src);
- for (auto it = src.begin(), e = src.end(); it != e;) {
- auto next = std::next(it);
- if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
- PolicyTraits::element(it.slot_))
- .second) {
- src.erase_meta_only(it);
- }
- it = next;
- }
- }
-
- template <typename H, typename E>
- void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
- merge(src);
- }
-
- node_type extract(const_iterator position) {
+ PolicyTraits::destroy(&alloc_ref(), it.slot_);
+ erase_meta_only(it);
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ while (first != last) {
+ erase(first++);
+ }
+ return last.inner_;
+ }
+
+ // Moves elements from `src` into `this`.
+ // If the element already exists in `this`, it is left unmodified in `src`.
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
+ assert(this != &src);
+ for (auto it = src.begin(), e = src.end(); it != e;) {
+ auto next = std::next(it);
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
+ PolicyTraits::element(it.slot_))
+ .second) {
+ src.erase_meta_only(it);
+ }
+ it = next;
+ }
+ }
+
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
+ merge(src);
+ }
+
+ node_type extract(const_iterator position) {
AssertIsFull(position.inner_.ctrl_);
- auto node =
- CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
- erase_meta_only(position);
- return node;
- }
-
- template <
- class K = key_type,
- typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
- node_type extract(const key_arg<K>& key) {
- auto it = find(key);
- return it == end() ? node_type() : extract(const_iterator{it});
- }
-
- void swap(raw_hash_set& that) noexcept(
- IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
+ erase_meta_only(position);
+ return node;
+ }
+
+ template <
+ class K = key_type,
+ typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
+ node_type extract(const key_arg<K>& key) {
+ auto it = find(key);
+ return it == end() ? node_type() : extract(const_iterator{it});
+ }
+
+ void swap(raw_hash_set& that) noexcept(
+ IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
- using std::swap;
- swap(ctrl_, that.ctrl_);
- swap(slots_, that.slots_);
- swap(size_, that.size_);
- swap(capacity_, that.capacity_);
- swap(growth_left(), that.growth_left());
- swap(hash_ref(), that.hash_ref());
- swap(eq_ref(), that.eq_ref());
+ using std::swap;
+ swap(ctrl_, that.ctrl_);
+ swap(slots_, that.slots_);
+ swap(size_, that.size_);
+ swap(capacity_, that.capacity_);
+ swap(growth_left(), that.growth_left());
+ swap(hash_ref(), that.hash_ref());
+ swap(eq_ref(), that.eq_ref());
swap(infoz(), that.infoz());
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
- }
-
- void rehash(size_t n) {
- if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) {
- destroy_slots();
+ }
+
+ void rehash(size_t n) {
+ if (n == 0 && capacity_ == 0) return;
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
infoz().RecordStorageChanged(0, 0);
infoz().RecordClearedReservation();
- return;
- }
+ return;
+ }
- // bitor is a faster way of doing `max` here. We will round up to the next
- // power-of-2-minus-1, so bitor is good enough.
- auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
- // n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > capacity_) {
- resize(m);
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
+ // n == 0 unconditionally rehashes as per the standard.
+ if (n == 0 || m > capacity_) {
+ resize(m);
// This is after resize, to ensure that we have completed the allocation
// and have potentially sampled the hashtable.
infoz().RecordReservation(n);
- }
- }
-
+ }
+ }
+
void reserve(size_t n) {
if (n > size() + growth_left()) {
size_t m = GrowthToLowerboundCapacity(n);
@@ -1422,230 +1422,230 @@ class raw_hash_set {
infoz().RecordReservation(n);
}
}
-
- // Extension API: support for heterogeneous keys.
- //
- // std::unordered_set<std::string> s;
- // // Turns "abc" into std::string.
- // s.count("abc");
- //
- // ch_set<std::string> s;
- // // Uses "abc" directly without copying it into std::string.
- // s.count("abc");
- template <class K = key_type>
- size_t count(const key_arg<K>& key) const {
- return find(key) == end() ? 0 : 1;
- }
-
- // Issues CPU prefetch instructions for the memory needed to find or insert
- // a key. Like all lookup functions, this support heterogeneous keys.
- //
- // NOTE: This is a very low level operation and should not be used without
- // specific benchmarks indicating its importance.
- template <class K = key_type>
- void prefetch(const key_arg<K>& key) const {
- (void)key;
-#if defined(__GNUC__)
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.count("abc");
+ //
+ // ch_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.count("abc");
+ template <class K = key_type>
+ size_t count(const key_arg<K>& key) const {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ // Issues CPU prefetch instructions for the memory needed to find or insert
+ // a key. Like all lookup functions, this support heterogeneous keys.
+ //
+ // NOTE: This is a very low level operation and should not be used without
+ // specific benchmarks indicating its importance.
+ template <class K = key_type>
+ void prefetch(const key_arg<K>& key) const {
+ (void)key;
+#if defined(__GNUC__)
prefetch_heap_block();
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
- __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
- __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif // __GNUC__
- }
-
- // The API of find() has two extensions.
- //
- // 1. The hash can be passed by the user. It must be equal to the hash of the
- // key.
- //
- // 2. The type of the key argument doesn't have to be key_type. This is so
- // called heterogeneous key support.
- template <class K = key_type>
- iterator find(const key_arg<K>& key, size_t hash) {
+ __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
+ __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
+#endif // __GNUC__
+ }
+
+ // The API of find() has two extensions.
+ //
+ // 1. The hash can be passed by the user. It must be equal to the hash of the
+ // key.
+ //
+ // 2. The type of the key argument doesn't have to be key_type. This is so
+ // called heterogeneous key support.
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key, size_t hash) {
auto seq = probe(ctrl_, hash, capacity_);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
- EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
- return iterator_at(seq.offset(i));
- }
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
- seq.next();
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return iterator_at(seq.offset(i));
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ seq.next();
assert(seq.index() <= capacity_ && "full table!");
- }
- }
- template <class K = key_type>
- iterator find(const key_arg<K>& key) {
+ }
+ }
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key) {
prefetch_heap_block();
- return find(key, hash_ref()(key));
- }
-
- template <class K = key_type>
- const_iterator find(const key_arg<K>& key, size_t hash) const {
- return const_cast<raw_hash_set*>(this)->find(key, hash);
- }
- template <class K = key_type>
- const_iterator find(const key_arg<K>& key) const {
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key, size_t hash) const {
+ return const_cast<raw_hash_set*>(this)->find(key, hash);
+ }
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key) const {
prefetch_heap_block();
- return find(key, hash_ref()(key));
- }
-
- template <class K = key_type>
- bool contains(const key_arg<K>& key) const {
- return find(key) != end();
- }
-
- template <class K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
- auto it = find(key);
- if (it != end()) return {it, std::next(it)};
- return {it, it};
- }
- template <class K = key_type>
- std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K>& key) const {
- auto it = find(key);
- if (it != end()) return {it, std::next(it)};
- return {it, it};
- }
-
- size_t bucket_count() const { return capacity_; }
- float load_factor() const {
- return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
- }
- float max_load_factor() const { return 1.0f; }
- void max_load_factor(float) {
- // Does nothing.
- }
-
- hasher hash_function() const { return hash_ref(); }
- key_equal key_eq() const { return eq_ref(); }
- allocator_type get_allocator() const { return alloc_ref(); }
-
- friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
- if (a.size() != b.size()) return false;
- const raw_hash_set* outer = &a;
- const raw_hash_set* inner = &b;
- if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
- for (const value_type& elem : *outer)
- if (!inner->has_element(elem)) return false;
- return true;
- }
-
- friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
- return !(a == b);
- }
-
- friend void swap(raw_hash_set& a,
- raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
- a.swap(b);
- }
-
- private:
- template <class Container, typename Enabler>
- friend struct absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess;
-
- struct FindElement {
- template <class K, class... Args>
- const_iterator operator()(const K& key, Args&&...) const {
- return s.find(key);
- }
- const raw_hash_set& s;
- };
-
- struct HashElement {
- template <class K, class... Args>
- size_t operator()(const K& key, Args&&...) const {
- return h(key);
- }
- const hasher& h;
- };
-
- template <class K1>
- struct EqualElement {
- template <class K2, class... Args>
- bool operator()(const K2& lhs, Args&&...) const {
- return eq(lhs, rhs);
- }
- const K1& rhs;
- const key_equal& eq;
- };
-
- struct EmplaceDecomposable {
- template <class K, class... Args>
- std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
- auto res = s.find_or_prepare_insert(key);
- if (res.second) {
- s.emplace_at(res.first, std::forward<Args>(args)...);
- }
- return {s.iterator_at(res.first), res.second};
- }
- raw_hash_set& s;
- };
-
- template <bool do_destroy>
- struct InsertSlot {
- template <class K, class... Args>
- std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
- auto res = s.find_or_prepare_insert(key);
- if (res.second) {
- PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
- } else if (do_destroy) {
- PolicyTraits::destroy(&s.alloc_ref(), &slot);
- }
- return {s.iterator_at(res.first), res.second};
- }
- raw_hash_set& s;
- // Constructed slot. Either moved into place or destroyed.
- slot_type&& slot;
- };
-
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
- void erase_meta_only(const_iterator it) {
- assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
- --size_;
- const size_t index = it.inner_.ctrl_ - ctrl_;
- const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
-
- // We count how many consecutive non empties we have to the right and to the
- // left of `it`. If the sum is >= kWidth then there is at least one probe
- // window that might have seen a full group.
- bool was_never_full =
- empty_before && empty_after &&
- static_cast<size_t>(empty_after.TrailingZeros() +
- empty_before.LeadingZeros()) < Group::kWidth;
-
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ bool contains(const key_arg<K>& key) const {
+ return find(key) != end();
+ }
+
+ template <class K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+ template <class K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K>& key) const {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+
+ size_t bucket_count() const { return capacity_; }
+ float load_factor() const {
+ return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ }
+ float max_load_factor() const { return 1.0f; }
+ void max_load_factor(float) {
+ // Does nothing.
+ }
+
+ hasher hash_function() const { return hash_ref(); }
+ key_equal key_eq() const { return eq_ref(); }
+ allocator_type get_allocator() const { return alloc_ref(); }
+
+ friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
+ if (a.size() != b.size()) return false;
+ const raw_hash_set* outer = &a;
+ const raw_hash_set* inner = &b;
+ if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
+ for (const value_type& elem : *outer)
+ if (!inner->has_element(elem)) return false;
+ return true;
+ }
+
+ friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
+ return !(a == b);
+ }
+
+ friend void swap(raw_hash_set& a,
+ raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+ }
+
+ private:
+ template <class Container, typename Enabler>
+ friend struct absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess;
+
+ struct FindElement {
+ template <class K, class... Args>
+ const_iterator operator()(const K& key, Args&&...) const {
+ return s.find(key);
+ }
+ const raw_hash_set& s;
+ };
+
+ struct HashElement {
+ template <class K, class... Args>
+ size_t operator()(const K& key, Args&&...) const {
+ return h(key);
+ }
+ const hasher& h;
+ };
+
+ template <class K1>
+ struct EqualElement {
+ template <class K2, class... Args>
+ bool operator()(const K2& lhs, Args&&...) const {
+ return eq(lhs, rhs);
+ }
+ const K1& rhs;
+ const key_equal& eq;
+ };
+
+ struct EmplaceDecomposable {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ s.emplace_at(res.first, std::forward<Args>(args)...);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ };
+
+ template <bool do_destroy>
+ struct InsertSlot {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ } else if (do_destroy) {
+ PolicyTraits::destroy(&s.alloc_ref(), &slot);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ // Constructed slot. Either moved into place or destroyed.
+ slot_type&& slot;
+ };
+
+ // "erases" the object from the container, except that it doesn't actually
+ // destroy the object. It only updates all the metadata of the class.
+ // This can be used in conjunction with Policy::transfer to move the object to
+ // another place.
+ void erase_meta_only(const_iterator it) {
+ assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
+ --size_;
+ const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index_before = (index - Group::kWidth) & capacity_;
+ const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+
+ // We count how many consecutive non empties we have to the right and to the
+ // left of `it`. If the sum is >= kWidth then there is at least one probe
+ // window that might have seen a full group.
+ bool was_never_full =
+ empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros() +
+ empty_before.LeadingZeros()) < Group::kWidth;
+
SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
capacity_, ctrl_, slots_, sizeof(slot_type));
- growth_left() += was_never_full;
+ growth_left() += was_never_full;
infoz().RecordErase();
- }
-
- void initialize_slots() {
- assert(capacity_);
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or wont make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- //
- // People are often sloppy with the exact type of their allocator (sometimes
- // it has an extra const or is missing the pair, but rebinds made it work
- // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
- // bound more carefully.
- if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
- slots_ == nullptr) {
+ }
+
+ void initialize_slots() {
+ assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
infoz() = Sample(sizeof(slot_type));
- }
-
+ }
+
char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
&alloc_ref(),
AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
@@ -1653,133 +1653,133 @@ class raw_hash_set {
slots_ = reinterpret_cast<slot_type*>(
mem + SlotOffset(capacity_, alignof(slot_type)));
ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
- reset_growth_left();
+ reset_growth_left();
infoz().RecordStorageChanged(size_, capacity_);
- }
-
- void destroy_slots() {
- if (!capacity_) return;
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
- }
-
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ }
+
+ void destroy_slots() {
+ if (!capacity_) return;
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
Deallocate<alignof(slot_type)>(
&alloc_ref(), ctrl_,
AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
- ctrl_ = EmptyGroup();
- slots_ = nullptr;
- size_ = 0;
- capacity_ = 0;
- growth_left() = 0;
- }
-
- void resize(size_t new_capacity) {
- assert(IsValidCapacity(new_capacity));
- auto* old_ctrl = ctrl_;
- auto* old_slots = slots_;
- const size_t old_capacity = capacity_;
- capacity_ = new_capacity;
- initialize_slots();
-
- size_t total_probe_length = 0;
- for (size_t i = 0; i != old_capacity; ++i) {
- if (IsFull(old_ctrl[i])) {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(old_slots + i));
+ ctrl_ = EmptyGroup();
+ slots_ = nullptr;
+ size_ = 0;
+ capacity_ = 0;
+ growth_left() = 0;
+ }
+
+ void resize(size_t new_capacity) {
+ assert(IsValidCapacity(new_capacity));
+ auto* old_ctrl = ctrl_;
+ auto* old_slots = slots_;
+ const size_t old_capacity = capacity_;
+ capacity_ = new_capacity;
+ initialize_slots();
+
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i != old_capacity; ++i) {
+ if (IsFull(old_ctrl[i])) {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(old_slots + i));
auto target = find_first_non_full(ctrl_, hash, capacity_);
- size_t new_i = target.offset;
- total_probe_length += target.probe_length;
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
- }
- }
- if (old_capacity) {
- SanitizerUnpoisonMemoryRegion(old_slots,
- sizeof(slot_type) * old_capacity);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ }
+ }
+ if (old_capacity) {
+ SanitizerUnpoisonMemoryRegion(old_slots,
+ sizeof(slot_type) * old_capacity);
Deallocate<alignof(slot_type)>(
&alloc_ref(), old_ctrl,
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
- }
+ }
infoz().RecordRehash(total_probe_length);
- }
-
- void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
- assert(IsValidCapacity(capacity_));
+ }
+
+ void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
+ assert(IsValidCapacity(capacity_));
assert(!is_small(capacity_));
- // Algorithm:
- // - mark all DELETED slots as EMPTY
- // - mark all FULL slots as DELETED
- // - for each slot marked as DELETED
- // hash = Hash(element)
- // target = find_first_non_full(hash)
- // if target is in the same group
- // mark slot as FULL
- // else if target is EMPTY
- // transfer element to target
- // mark slot as EMPTY
- // mark target as FULL
- // else if target is DELETED
- // swap current element with target element
- // mark target as FULL
- // repeat procedure for current slot with moved from element (target)
- ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
+ // Algorithm:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ // - for each slot marked as DELETED
+ // hash = Hash(element)
+ // target = find_first_non_full(hash)
+ // if target is in the same group
+ // mark slot as FULL
+ // else if target is EMPTY
+ // transfer element to target
+ // mark slot as EMPTY
+ // mark target as FULL
+ // else if target is DELETED
+ // swap current element with target element
+ // mark target as FULL
+ // repeat procedure for current slot with moved from element (target)
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- size_t total_probe_length = 0;
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
- for (size_t i = 0; i != capacity_; ++i) {
- if (!IsDeleted(ctrl_[i])) continue;
+ size_t total_probe_length = 0;
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (!IsDeleted(ctrl_[i])) continue;
const size_t hash = PolicyTraits::apply(
HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
const size_t new_i = target.offset;
- total_probe_length += target.probe_length;
-
- // Verify if the old and new i fall within the same group wrt the hash.
- // If they do, we don't need to move the object as it falls already in the
- // best probe we can.
+ total_probe_length += target.probe_length;
+
+ // Verify if the old and new i fall within the same group wrt the hash.
+ // If they do, we don't need to move the object as it falls already in the
+ // best probe we can.
const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
const auto probe_index = [probe_offset, this](size_t pos) {
return ((pos - probe_offset) & capacity_) / Group::kWidth;
- };
-
- // Element doesn't move.
- if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+ };
+
+ // Element doesn't move.
+ if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- continue;
- }
- if (IsEmpty(ctrl_[new_i])) {
- // Transfer element to the empty spot.
+ continue;
+ }
+ if (IsEmpty(ctrl_[new_i])) {
+ // Transfer element to the empty spot.
// SetCtrl poisons/unpoisons the slots so we have to call it at the
- // right time.
+ // right time.
SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
- } else {
- assert(IsDeleted(ctrl_[new_i]));
+ } else {
+ assert(IsDeleted(ctrl_[new_i]));
SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- // Until we are done rehashing, DELETED marks previously FULL slots.
- // Swap i and new_i elements.
- PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
- --i; // repeat
- }
- }
- reset_growth_left();
+ // Until we are done rehashing, DELETED marks previously FULL slots.
+ // Swap i and new_i elements.
+ PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
+ --i; // repeat
+ }
+ }
+ reset_growth_left();
infoz().RecordRehash(total_probe_length);
- }
-
- void rehash_and_grow_if_necessary() {
- if (capacity_ == 0) {
- resize(1);
+ }
+
+ void rehash_and_grow_if_necessary() {
+ if (capacity_ == 0) {
+ resize(1);
} else if (capacity_ > Group::kWidth &&
// Do these calcuations in 64-bit to avoid overflow.
size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
- // Squash DELETED without growing if there is enough capacity.
+ // Squash DELETED without growing if there is enough capacity.
//
// Rehash in place if the current size is <= 25/32 of capacity_.
// Rationale for such a high factor: 1) drop_deletes_without_resize() is
@@ -1820,108 +1820,108 @@ class raw_hash_set {
// 762 | 149836 0.37 13 | 148559 0.74 190
// 807 | 149736 0.39 14 | 151107 0.39 14
// 852 | 150204 0.42 15 | 151019 0.42 15
- drop_deletes_without_resize();
- } else {
- // Otherwise grow the container.
- resize(capacity_ * 2 + 1);
- }
- }
-
- bool has_element(const value_type& elem) const {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
+ drop_deletes_without_resize();
+ } else {
+ // Otherwise grow the container.
+ resize(capacity_ * 2 + 1);
+ }
+ }
+
+ bool has_element(const value_type& elem) const {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
auto seq = probe(ctrl_, hash, capacity_);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
- elem))
- return true;
- }
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
- seq.next();
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
+ elem))
+ return true;
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ seq.next();
assert(seq.index() <= capacity_ && "full table!");
- }
- return false;
- }
-
- // TODO(alkis): Optimize this assuming *this and that don't overlap.
- raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
- raw_hash_set tmp(std::move(that));
- swap(tmp);
- return *this;
- }
- raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
- raw_hash_set tmp(std::move(that), alloc_ref());
- swap(tmp);
- return *this;
- }
-
- protected:
- template <class K>
- std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ }
+ return false;
+ }
+
+ // TODO(alkis): Optimize this assuming *this and that don't overlap.
+ raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
+ raw_hash_set tmp(std::move(that));
+ swap(tmp);
+ return *this;
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
+ raw_hash_set tmp(std::move(that), alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ protected:
+ template <class K>
+ std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
- auto hash = hash_ref()(key);
+ auto hash = hash_ref()(key);
auto seq = probe(ctrl_, hash, capacity_);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
- EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
- return {seq.offset(i), false};
- }
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
- seq.next();
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return {seq.offset(i), false};
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ seq.next();
assert(seq.index() <= capacity_ && "full table!");
- }
- return {prepare_insert(hash), true};
- }
-
- size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
+ }
+ return {prepare_insert(hash), true};
+ }
+
+ size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(ctrl_, hash, capacity_);
- if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
- !IsDeleted(ctrl_[target.offset]))) {
- rehash_and_grow_if_necessary();
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
+ rehash_and_grow_if_necessary();
target = find_first_non_full(ctrl_, hash, capacity_);
- }
- ++size_;
- growth_left() -= IsEmpty(ctrl_[target.offset]);
+ }
+ ++size_;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
infoz().RecordInsert(hash, target.probe_length);
- return target.offset;
- }
-
- // Constructs the value in the space pointed by the iterator. This only works
- // after an unsuccessful find_or_prepare_insert() and before any other
- // modifications happen in the raw_hash_set.
- //
- // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
- // k is the key decomposed from `forward<Args>(args)...`, and the bool
- // returned by find_or_prepare_insert(k) was true.
- // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
- template <class... Args>
- void emplace_at(size_t i, Args&&... args) {
- PolicyTraits::construct(&alloc_ref(), slots_ + i,
- std::forward<Args>(args)...);
-
- assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
- iterator_at(i) &&
- "constructed value does not match the lookup key");
- }
-
- iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
- const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
-
- private:
- friend struct RawHashSetTestOnlyAccess;
-
- void reset_growth_left() {
- growth_left() = CapacityToGrowth(capacity()) - size_;
- }
-
+ return target.offset;
+ }
+
+ // Constructs the value in the space pointed by the iterator. This only works
+ // after an unsuccessful find_or_prepare_insert() and before any other
+ // modifications happen in the raw_hash_set.
+ //
+ // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
+ // k is the key decomposed from `forward<Args>(args)...`, and the bool
+ // returned by find_or_prepare_insert(k) was true.
+ // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
+ template <class... Args>
+ void emplace_at(size_t i, Args&&... args) {
+ PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ std::forward<Args>(args)...);
+
+ assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
+ iterator_at(i) &&
+ "constructed value does not match the lookup key");
+ }
+
+ iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
+ const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+
+ private:
+ friend struct RawHashSetTestOnlyAccess;
+
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
size_t& growth_left() { return settings_.template get<0>(); }
-
+
void prefetch_heap_block() const {
// Prefetch the heap-allocated memory region to resolve potential TLB
// misses. This is intended to overlap with execution of calculating the
@@ -1929,33 +1929,33 @@ class raw_hash_set {
#if defined(__GNUC__)
__builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
#endif // __GNUC__
- }
-
+ }
+
HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
-
+
hasher& hash_ref() { return settings_.template get<2>(); }
const hasher& hash_ref() const { return settings_.template get<2>(); }
key_equal& eq_ref() { return settings_.template get<3>(); }
const key_equal& eq_ref() const { return settings_.template get<3>(); }
allocator_type& alloc_ref() { return settings_.template get<4>(); }
- const allocator_type& alloc_ref() const {
+ const allocator_type& alloc_ref() const {
return settings_.template get<4>();
- }
-
- // TODO(alkis): Investigate removing some of these fields:
- // - ctrl/slots can be derived from each other
- // - size can be moved into the slot array
+ }
+
+ // TODO(alkis): Investigate removing some of these fields:
+ // - ctrl/slots can be derived from each other
+ // - size can be moved into the slot array
ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
- key_equal, allocator_type>
+ key_equal, allocator_type>
settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
allocator_type{}};
-};
-
+};
+
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
@@ -1968,67 +1968,67 @@ void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
}
}
-namespace hashtable_debug_internal {
-template <typename Set>
-struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
- using Traits = typename Set::PolicyTraits;
- using Slot = typename Traits::slot_type;
-
- static size_t GetNumProbes(const Set& set,
- const typename Set::key_type& key) {
- size_t num_probes = 0;
- size_t hash = set.hash_ref()(key);
+namespace hashtable_debug_internal {
+template <typename Set>
+struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
+ using Traits = typename Set::PolicyTraits;
+ using Slot = typename Traits::slot_type;
+
+ static size_t GetNumProbes(const Set& set,
+ const typename Set::key_type& key) {
+ size_t num_probes = 0;
+ size_t hash = set.hash_ref()(key);
auto seq = probe(set.ctrl_, hash, set.capacity_);
- while (true) {
- container_internal::Group g{set.ctrl_ + seq.offset()};
- for (int i : g.Match(container_internal::H2(hash))) {
- if (Traits::apply(
- typename Set::template EqualElement<typename Set::key_type>{
- key, set.eq_ref()},
- Traits::element(set.slots_ + seq.offset(i))))
- return num_probes;
- ++num_probes;
- }
- if (g.MatchEmpty()) return num_probes;
- seq.next();
- ++num_probes;
- }
- }
-
- static size_t AllocatedByteSize(const Set& c) {
- size_t capacity = c.capacity_;
- if (capacity == 0) return 0;
+ while (true) {
+ container_internal::Group g{set.ctrl_ + seq.offset()};
+ for (int i : g.Match(container_internal::H2(hash))) {
+ if (Traits::apply(
+ typename Set::template EqualElement<typename Set::key_type>{
+ key, set.eq_ref()},
+ Traits::element(set.slots_ + seq.offset(i))))
+ return num_probes;
+ ++num_probes;
+ }
+ if (g.MatchEmpty()) return num_probes;
+ seq.next();
+ ++num_probes;
+ }
+ }
+
+ static size_t AllocatedByteSize(const Set& c) {
+ size_t capacity = c.capacity_;
+ if (capacity == 0) return 0;
size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
-
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
- if (per_slot != ~size_t{}) {
- m += per_slot * c.size();
- } else {
- for (size_t i = 0; i != capacity; ++i) {
- if (container_internal::IsFull(c.ctrl_[i])) {
- m += Traits::space_used(c.slots_ + i);
- }
- }
- }
- return m;
- }
-
- static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = GrowthToLowerboundCapacity(size);
- if (capacity == 0) return 0;
+
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * c.size();
+ } else {
+ for (size_t i = 0; i != capacity; ++i) {
+ if (container_internal::IsFull(c.ctrl_[i])) {
+ m += Traits::space_used(c.slots_ + i);
+ }
+ }
+ }
+ return m;
+ }
+
+ static size_t LowerBoundAllocatedByteSize(size_t size) {
+ size_t capacity = GrowthToLowerboundCapacity(size);
+ if (capacity == 0) return 0;
size_t m =
AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
- if (per_slot != ~size_t{}) {
- m += per_slot * size;
- }
- return m;
- }
-};
-
-} // namespace hashtable_debug_internal
-} // namespace container_internal
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * size;
+ }
+ return m;
+ }
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/test_instance_tracker.h b/contrib/restricted/abseil-cpp/absl/container/internal/test_instance_tracker.h
index 5ff6fd714e..dec63022fe 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/test_instance_tracker.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/test_instance_tracker.h
@@ -1,274 +1,274 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
-#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
-
-#include <cstdlib>
-#include <ostream>
-
-#include "absl/types/compare.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+
+#include <cstdlib>
+#include <ostream>
+
+#include "absl/types/compare.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace test_internal {
-
-// A type that counts number of occurrences of the type, the live occurrences of
-// the type, as well as the number of copies, moves, swaps, and comparisons that
-// have occurred on the type. This is used as a base class for the copyable,
-// copyable+movable, and movable types below that are used in actual tests. Use
-// InstanceTracker in tests to track the number of instances.
-class BaseCountedInstance {
- public:
- explicit BaseCountedInstance(int x) : value_(x) {
- ++num_instances_;
- ++num_live_instances_;
- }
- BaseCountedInstance(const BaseCountedInstance& x)
- : value_(x.value_), is_live_(x.is_live_) {
- ++num_instances_;
- if (is_live_) ++num_live_instances_;
- ++num_copies_;
- }
- BaseCountedInstance(BaseCountedInstance&& x)
- : value_(x.value_), is_live_(x.is_live_) {
- x.is_live_ = false;
- ++num_instances_;
- ++num_moves_;
- }
- ~BaseCountedInstance() {
- --num_instances_;
- if (is_live_) --num_live_instances_;
- }
-
- BaseCountedInstance& operator=(const BaseCountedInstance& x) {
- value_ = x.value_;
- if (is_live_) --num_live_instances_;
- is_live_ = x.is_live_;
- if (is_live_) ++num_live_instances_;
- ++num_copies_;
- return *this;
- }
- BaseCountedInstance& operator=(BaseCountedInstance&& x) {
- value_ = x.value_;
- if (is_live_) --num_live_instances_;
- is_live_ = x.is_live_;
- x.is_live_ = false;
- ++num_moves_;
- return *this;
- }
-
- bool operator==(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ == x.value_;
- }
-
- bool operator!=(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ != x.value_;
- }
-
- bool operator<(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ < x.value_;
- }
-
- bool operator>(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ > x.value_;
- }
-
- bool operator<=(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ <= x.value_;
- }
-
- bool operator>=(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ >= x.value_;
- }
-
- absl::weak_ordering compare(const BaseCountedInstance& x) const {
- ++num_comparisons_;
- return value_ < x.value_
- ? absl::weak_ordering::less
- : value_ == x.value_ ? absl::weak_ordering::equivalent
- : absl::weak_ordering::greater;
- }
-
- int value() const {
- if (!is_live_) std::abort();
- return value_;
- }
-
- friend std::ostream& operator<<(std::ostream& o,
- const BaseCountedInstance& v) {
- return o << "[value:" << v.value() << "]";
- }
-
- // Implementation of efficient swap() that counts swaps.
- static void SwapImpl(
- BaseCountedInstance& lhs, // NOLINT(runtime/references)
- BaseCountedInstance& rhs) { // NOLINT(runtime/references)
- using std::swap;
- swap(lhs.value_, rhs.value_);
- swap(lhs.is_live_, rhs.is_live_);
- ++BaseCountedInstance::num_swaps_;
- }
-
- private:
- friend class InstanceTracker;
-
- int value_;
-
- // Indicates if the value is live, ie it hasn't been moved away from.
- bool is_live_ = true;
-
- // Number of instances.
- static int num_instances_;
-
- // Number of live instances (those that have not been moved away from.)
- static int num_live_instances_;
-
- // Number of times that BaseCountedInstance objects were moved.
- static int num_moves_;
-
- // Number of times that BaseCountedInstance objects were copied.
- static int num_copies_;
-
- // Number of times that BaseCountedInstance objects were swapped.
- static int num_swaps_;
-
- // Number of times that BaseCountedInstance objects were compared.
- static int num_comparisons_;
-};
-
-// Helper to track the BaseCountedInstance instance counters. Expects that the
-// number of instances and live_instances are the same when it is constructed
-// and when it is destructed.
-class InstanceTracker {
- public:
- InstanceTracker()
- : start_instances_(BaseCountedInstance::num_instances_),
- start_live_instances_(BaseCountedInstance::num_live_instances_) {
- ResetCopiesMovesSwaps();
- }
- ~InstanceTracker() {
- if (instances() != 0) std::abort();
- if (live_instances() != 0) std::abort();
- }
-
- // Returns the number of BaseCountedInstance instances both containing valid
- // values and those moved away from compared to when the InstanceTracker was
- // constructed
- int instances() const {
- return BaseCountedInstance::num_instances_ - start_instances_;
- }
-
- // Returns the number of live BaseCountedInstance instances compared to when
- // the InstanceTracker was constructed
- int live_instances() const {
- return BaseCountedInstance::num_live_instances_ - start_live_instances_;
- }
-
- // Returns the number of moves on BaseCountedInstance objects since
- // construction or since the last call to ResetCopiesMovesSwaps().
- int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
-
- // Returns the number of copies on BaseCountedInstance objects since
- // construction or the last call to ResetCopiesMovesSwaps().
- int copies() const {
- return BaseCountedInstance::num_copies_ - start_copies_;
- }
-
- // Returns the number of swaps on BaseCountedInstance objects since
- // construction or the last call to ResetCopiesMovesSwaps().
- int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
-
- // Returns the number of comparisons on BaseCountedInstance objects since
- // construction or the last call to ResetCopiesMovesSwaps().
- int comparisons() const {
- return BaseCountedInstance::num_comparisons_ - start_comparisons_;
- }
-
- // Resets the base values for moves, copies, comparisons, and swaps to the
- // current values, so that subsequent Get*() calls for moves, copies,
- // comparisons, and swaps will compare to the situation at the point of this
- // call.
- void ResetCopiesMovesSwaps() {
- start_moves_ = BaseCountedInstance::num_moves_;
- start_copies_ = BaseCountedInstance::num_copies_;
- start_swaps_ = BaseCountedInstance::num_swaps_;
- start_comparisons_ = BaseCountedInstance::num_comparisons_;
- }
-
- private:
- int start_instances_;
- int start_live_instances_;
- int start_moves_;
- int start_copies_;
- int start_swaps_;
- int start_comparisons_;
-};
-
-// Copyable, not movable.
-class CopyableOnlyInstance : public BaseCountedInstance {
- public:
- explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
- CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
- CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
-
- friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
- BaseCountedInstance::SwapImpl(lhs, rhs);
- }
-
- static bool supports_move() { return false; }
-};
-
-// Copyable and movable.
-class CopyableMovableInstance : public BaseCountedInstance {
- public:
- explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
- CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
- CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
- CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
- default;
- CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
-
- friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
- BaseCountedInstance::SwapImpl(lhs, rhs);
- }
-
- static bool supports_move() { return true; }
-};
-
-// Only movable, not default-constructible.
-class MovableOnlyInstance : public BaseCountedInstance {
- public:
- explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
- MovableOnlyInstance(MovableOnlyInstance&& other) = default;
- MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
-
- friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
- BaseCountedInstance::SwapImpl(lhs, rhs);
- }
-
- static bool supports_move() { return true; }
-};
-
-} // namespace test_internal
+namespace test_internal {
+
+// A type that counts number of occurrences of the type, the live occurrences of
+// the type, as well as the number of copies, moves, swaps, and comparisons that
+// have occurred on the type. This is used as a base class for the copyable,
+// copyable+movable, and movable types below that are used in actual tests. Use
+// InstanceTracker in tests to track the number of instances.
+class BaseCountedInstance {
+ public:
+ explicit BaseCountedInstance(int x) : value_(x) {
+ ++num_instances_;
+ ++num_live_instances_;
+ }
+ BaseCountedInstance(const BaseCountedInstance& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ ++num_instances_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ }
+ BaseCountedInstance(BaseCountedInstance&& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ x.is_live_ = false;
+ ++num_instances_;
+ ++num_moves_;
+ }
+ ~BaseCountedInstance() {
+ --num_instances_;
+ if (is_live_) --num_live_instances_;
+ }
+
+ BaseCountedInstance& operator=(const BaseCountedInstance& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ return *this;
+ }
+ BaseCountedInstance& operator=(BaseCountedInstance&& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ x.is_live_ = false;
+ ++num_moves_;
+ return *this;
+ }
+
+ bool operator==(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ == x.value_;
+ }
+
+ bool operator!=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ != x.value_;
+ }
+
+ bool operator<(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_;
+ }
+
+ bool operator>(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ > x.value_;
+ }
+
+ bool operator<=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ <= x.value_;
+ }
+
+ bool operator>=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ >= x.value_;
+ }
+
+ absl::weak_ordering compare(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_
+ ? absl::weak_ordering::less
+ : value_ == x.value_ ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+
+ int value() const {
+ if (!is_live_) std::abort();
+ return value_;
+ }
+
+ friend std::ostream& operator<<(std::ostream& o,
+ const BaseCountedInstance& v) {
+ return o << "[value:" << v.value() << "]";
+ }
+
+ // Implementation of efficient swap() that counts swaps.
+ static void SwapImpl(
+ BaseCountedInstance& lhs, // NOLINT(runtime/references)
+ BaseCountedInstance& rhs) { // NOLINT(runtime/references)
+ using std::swap;
+ swap(lhs.value_, rhs.value_);
+ swap(lhs.is_live_, rhs.is_live_);
+ ++BaseCountedInstance::num_swaps_;
+ }
+
+ private:
+ friend class InstanceTracker;
+
+ int value_;
+
+ // Indicates if the value is live, ie it hasn't been moved away from.
+ bool is_live_ = true;
+
+ // Number of instances.
+ static int num_instances_;
+
+ // Number of live instances (those that have not been moved away from.)
+ static int num_live_instances_;
+
+ // Number of times that BaseCountedInstance objects were moved.
+ static int num_moves_;
+
+ // Number of times that BaseCountedInstance objects were copied.
+ static int num_copies_;
+
+ // Number of times that BaseCountedInstance objects were swapped.
+ static int num_swaps_;
+
+ // Number of times that BaseCountedInstance objects were compared.
+ static int num_comparisons_;
+};
+
+// Helper to track the BaseCountedInstance instance counters. Expects that the
+// number of instances and live_instances are the same when it is constructed
+// and when it is destructed.
+class InstanceTracker {
+ public:
+ InstanceTracker()
+ : start_instances_(BaseCountedInstance::num_instances_),
+ start_live_instances_(BaseCountedInstance::num_live_instances_) {
+ ResetCopiesMovesSwaps();
+ }
+ ~InstanceTracker() {
+ if (instances() != 0) std::abort();
+ if (live_instances() != 0) std::abort();
+ }
+
+ // Returns the number of BaseCountedInstance instances both containing valid
+ // values and those moved away from compared to when the InstanceTracker was
+ // constructed
+ int instances() const {
+ return BaseCountedInstance::num_instances_ - start_instances_;
+ }
+
+ // Returns the number of live BaseCountedInstance instances compared to when
+ // the InstanceTracker was constructed
+ int live_instances() const {
+ return BaseCountedInstance::num_live_instances_ - start_live_instances_;
+ }
+
+ // Returns the number of moves on BaseCountedInstance objects since
+ // construction or since the last call to ResetCopiesMovesSwaps().
+ int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
+
+ // Returns the number of copies on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int copies() const {
+ return BaseCountedInstance::num_copies_ - start_copies_;
+ }
+
+ // Returns the number of swaps on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
+
+ // Returns the number of comparisons on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int comparisons() const {
+ return BaseCountedInstance::num_comparisons_ - start_comparisons_;
+ }
+
+ // Resets the base values for moves, copies, comparisons, and swaps to the
+ // current values, so that subsequent Get*() calls for moves, copies,
+ // comparisons, and swaps will compare to the situation at the point of this
+ // call.
+ void ResetCopiesMovesSwaps() {
+ start_moves_ = BaseCountedInstance::num_moves_;
+ start_copies_ = BaseCountedInstance::num_copies_;
+ start_swaps_ = BaseCountedInstance::num_swaps_;
+ start_comparisons_ = BaseCountedInstance::num_comparisons_;
+ }
+
+ private:
+ int start_instances_;
+ int start_live_instances_;
+ int start_moves_;
+ int start_copies_;
+ int start_swaps_;
+ int start_comparisons_;
+};
+
+// Copyable, not movable.
+class CopyableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
+ CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
+
+ friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return false; }
+};
+
+// Copyable and movable.
+class CopyableMovableInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
+ CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
+ CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
+ CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
+ default;
+ CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
+
+ friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+// Only movable, not default-constructible.
+class MovableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ MovableOnlyInstance(MovableOnlyInstance&& other) = default;
+ MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
+
+ friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+} // namespace test_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/tracked.h b/contrib/restricted/abseil-cpp/absl/container/internal/tracked.h
index 29f5829f71..25d6046d2f 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/tracked.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/tracked.h
@@ -1,83 +1,83 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
-#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
-
-#include <stddef.h>
-
-#include <memory>
-#include <utility>
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
+#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
+
+#include <stddef.h>
+#include <memory>
+#include <utility>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// A class that tracks its copies and moves so that it can be queried in tests.
-template <class T>
-class Tracked {
- public:
- Tracked() {}
- // NOLINTNEXTLINE(runtime/explicit)
- Tracked(const T& val) : val_(val) {}
- Tracked(const Tracked& that)
- : val_(that.val_),
- num_moves_(that.num_moves_),
- num_copies_(that.num_copies_) {
- ++(*num_copies_);
- }
- Tracked(Tracked&& that)
- : val_(std::move(that.val_)),
- num_moves_(std::move(that.num_moves_)),
- num_copies_(std::move(that.num_copies_)) {
- ++(*num_moves_);
- }
- Tracked& operator=(const Tracked& that) {
- val_ = that.val_;
- num_moves_ = that.num_moves_;
- num_copies_ = that.num_copies_;
- ++(*num_copies_);
- }
- Tracked& operator=(Tracked&& that) {
- val_ = std::move(that.val_);
- num_moves_ = std::move(that.num_moves_);
- num_copies_ = std::move(that.num_copies_);
- ++(*num_moves_);
- }
-
- const T& val() const { return val_; }
-
- friend bool operator==(const Tracked& a, const Tracked& b) {
- return a.val_ == b.val_;
- }
- friend bool operator!=(const Tracked& a, const Tracked& b) {
- return !(a == b);
- }
-
- size_t num_copies() { return *num_copies_; }
- size_t num_moves() { return *num_moves_; }
-
- private:
- T val_;
- std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
- std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
-};
-
-} // namespace container_internal
+namespace container_internal {
+
+// A class that tracks its copies and moves so that it can be queried in tests.
+template <class T>
+class Tracked {
+ public:
+ Tracked() {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ Tracked(const T& val) : val_(val) {}
+ Tracked(const Tracked& that)
+ : val_(that.val_),
+ num_moves_(that.num_moves_),
+ num_copies_(that.num_copies_) {
+ ++(*num_copies_);
+ }
+ Tracked(Tracked&& that)
+ : val_(std::move(that.val_)),
+ num_moves_(std::move(that.num_moves_)),
+ num_copies_(std::move(that.num_copies_)) {
+ ++(*num_moves_);
+ }
+ Tracked& operator=(const Tracked& that) {
+ val_ = that.val_;
+ num_moves_ = that.num_moves_;
+ num_copies_ = that.num_copies_;
+ ++(*num_copies_);
+ }
+ Tracked& operator=(Tracked&& that) {
+ val_ = std::move(that.val_);
+ num_moves_ = std::move(that.num_moves_);
+ num_copies_ = std::move(that.num_copies_);
+ ++(*num_moves_);
+ }
+
+ const T& val() const { return val_; }
+
+ friend bool operator==(const Tracked& a, const Tracked& b) {
+ return a.val_ == b.val_;
+ }
+ friend bool operator!=(const Tracked& a, const Tracked& b) {
+ return !(a == b);
+ }
+
+ size_t num_copies() { return *num_copies_; }
+ size_t num_moves() { return *num_moves_; }
+
+ private:
+ T val_;
+ std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
+ std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
+};
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index c1d20f3c52..ef7d02564b 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -1,494 +1,494 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
-
-#include <algorithm>
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
#include <unordered_map>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-
-namespace absl {
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordMap>
-class ConstructorTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(ConstructorTest);
-
-TYPED_TEST_P(ConstructorTest, NoArgs) {
- TypeParam m;
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCount) {
- TypeParam m(123);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHash) {
- using H = typename TypeParam::hasher;
- H hasher;
- TypeParam m(123, hasher);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- H hasher;
- E equal;
- TypeParam m(123, hasher, equal);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-template <typename T>
-struct is_std_unordered_map : std::false_type {};
-
-template <typename... T>
-struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
-
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
-using has_cxx14_std_apis = std::true_type;
-#else
-using has_cxx14_std_apis = std::false_type;
-#endif
-
-template <typename T>
-using expect_cxx14_apis =
- absl::disjunction<absl::negation<is_std_unordered_map<T>>,
- has_cxx14_std_apis>;
-
-template <typename TypeParam>
-void BucketCountAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void BucketCountAllocTest(std::true_type) {
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- TypeParam m(123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
- BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void BucketCountHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void BucketCountHashAllocTest(std::true_type) {
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
- TypeParam m(123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
- BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
-using has_alloc_std_constructors = std::true_type;
-#else
-using has_alloc_std_constructors = std::false_type;
-#endif
-
-template <typename T>
-using expect_alloc_constructors =
- absl::disjunction<absl::negation<is_std_unordered_map<T>>,
- has_alloc_std_constructors>;
-
-template <typename TypeParam>
-void AllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void AllocTest(std::true_type) {
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- TypeParam m(alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(m, ::testing::UnorderedElementsAre());
-}
-
-TYPED_TEST_P(ConstructorTest, Alloc) {
- AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
- TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-template <typename TypeParam>
-void InputIteratorBucketAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InputIteratorBucketAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
- TypeParam m(values.begin(), values.end(), 123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
- InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void InputIteratorBucketHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InputIteratorBucketHashAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
- TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
- InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, CopyConstructor) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m(123, hasher, equal, alloc);
+ TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
- TypeParam n(m);
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-template <typename TypeParam>
-void CopyConstructorAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void CopyConstructorAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m(123, hasher, equal, alloc);
+ TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
- TypeParam n(m, A(11));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_NE(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
- CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-// TODO(alkis): Test non-propagating allocators on copy constructors.
-
-TYPED_TEST_P(ConstructorTest, MoveConstructor) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m(123, hasher, equal, alloc);
+ TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
- TypeParam t(m);
- TypeParam n(std::move(t));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-template <typename TypeParam>
-void MoveConstructorAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void MoveConstructorAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m(123, hasher, equal, alloc);
+ TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
- TypeParam t(m);
- TypeParam n(std::move(t), A(1));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_NE(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
- MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-// TODO(alkis): Test non-propagating allocators on move constructors.
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(values, 123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-template <typename TypeParam>
-void InitializerListBucketAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InitializerListBucketAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using A = typename TypeParam::allocator_type;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- A alloc(0);
- TypeParam m(values, 123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
- InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void InitializerListBucketHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InitializerListBucketHashAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m(values, 123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
- InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, Assignment) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
- TypeParam n;
- n = m;
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m, n);
-}
-
-// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
-// (it depends on traits).
-
-TYPED_TEST_P(ConstructorTest, MoveAssignment) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
hash_internal::UniqueGenerator<T> gen;
- TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
- TypeParam t(m);
- TypeParam n;
- n = std::move(t);
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m;
- m = values;
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- TypeParam m({gen(), gen(), gen()});
- TypeParam n({gen()});
- n = m;
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- TypeParam m({gen(), gen(), gen()});
- TypeParam t(m);
- TypeParam n({gen()});
- n = std::move(t);
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m;
- m = values;
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
- using T = hash_internal::GeneratedType<TypeParam>;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m(values);
- m = *&m; // Avoid -Wself-assign
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-// We cannot test self move as standard states that it leaves standard
-// containers in unspecified state (and in practice in causes memory-leak
-// according to heap-checker!).
-
-REGISTER_TYPED_TEST_CASE_P(
- ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
- InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
- InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
- MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
- InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
- MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
- MoveAssignmentOverwritesExisting,
- AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
-
-} // namespace container_internal
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+// We cannot test self move as standard states that it leaves standard
+// containers in unspecified state (and in practice in causes memory-leak
+// according to heap-checker!).
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
index e76421e508..897e366e4b 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
@@ -1,117 +1,117 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordMap>
-class LookupTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(LookupTest);
-
-TYPED_TEST_P(LookupTest, At) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- for (const auto& p : values) {
- const auto& val = m.at(p.first);
- EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
- }
-}
-
-TYPED_TEST_P(LookupTest, OperatorBracket) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& p : values) {
- auto& val = m[p.first];
- EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
- val = p.second;
- }
- for (const auto& p : values)
- EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
-}
-
-TYPED_TEST_P(LookupTest, Count) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& p : values)
- EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
- m.insert(values.begin(), values.end());
- for (const auto& p : values)
- EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
-}
-
-TYPED_TEST_P(LookupTest, Find) {
- using std::get;
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& p : values)
- EXPECT_TRUE(m.end() == m.find(p.first))
- << ::testing::PrintToString(p.first);
- m.insert(values.begin(), values.end());
- for (const auto& p : values) {
- auto it = m.find(p.first);
- EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
- EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
- }
-}
-
-TYPED_TEST_P(LookupTest, EqualRange) {
- using std::get;
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& p : values) {
- auto r = m.equal_range(p.first);
- ASSERT_EQ(0, std::distance(r.first, r.second));
- }
- m.insert(values.begin(), values.end());
- for (const auto& p : values) {
- auto r = m.equal_range(p.first);
- ASSERT_EQ(1, std::distance(r.first, r.second));
- EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
- }
-}
-
-REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
- EqualRange);
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class UnordMap>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, At) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ for (const auto& p : values) {
+ const auto& val = m.at(p.first);
+ EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, OperatorBracket) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto& val = m[p.first];
+ EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
+ val = p.second;
+ }
+ for (const auto& p : values)
+ EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values)
+ EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_TRUE(m.end() == m.find(p.first))
+ << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto it = m.find(p.first);
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
+ EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_members_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_members_test.h
index 7d48cdb890..03531eb2fc 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_members_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_members_test.h
@@ -1,87 +1,87 @@
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
-
-#include <type_traits>
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/meta/type_traits.h"
-
-namespace absl {
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordMap>
-class MembersTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(MembersTest);
-
-template <typename T>
-void UseType() {}
-
-TYPED_TEST_P(MembersTest, Typedefs) {
- EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
- typename TypeParam::mapped_type>,
- typename TypeParam::value_type>()));
- EXPECT_TRUE((absl::conjunction<
- absl::negation<std::is_signed<typename TypeParam::size_type>>,
- std::is_integral<typename TypeParam::size_type>>()));
- EXPECT_TRUE((absl::conjunction<
- std::is_signed<typename TypeParam::difference_type>,
- std::is_integral<typename TypeParam::difference_type>>()));
- EXPECT_TRUE((std::is_convertible<
- decltype(std::declval<const typename TypeParam::hasher&>()(
- std::declval<const typename TypeParam::key_type&>())),
- size_t>()));
- EXPECT_TRUE((std::is_convertible<
- decltype(std::declval<const typename TypeParam::key_equal&>()(
- std::declval<const typename TypeParam::key_type&>(),
- std::declval<const typename TypeParam::key_type&>())),
- bool>()));
- EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
- typename TypeParam::value_type>()));
- EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
- typename TypeParam::reference>()));
- EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
- typename TypeParam::const_reference>()));
- EXPECT_TRUE((std::is_same<typename std::allocator_traits<
- typename TypeParam::allocator_type>::pointer,
- typename TypeParam::pointer>()));
- EXPECT_TRUE(
- (std::is_same<typename std::allocator_traits<
- typename TypeParam::allocator_type>::const_pointer,
- typename TypeParam::const_pointer>()));
-}
-
-TYPED_TEST_P(MembersTest, SimpleFunctions) {
- EXPECT_GT(TypeParam().max_size(), 0);
-}
-
-TYPED_TEST_P(MembersTest, BeginEnd) {
- TypeParam t = {typename TypeParam::value_type{}};
- EXPECT_EQ(t.begin(), t.cbegin());
- EXPECT_EQ(t.end(), t.cend());
- EXPECT_NE(t.begin(), t.end());
- EXPECT_NE(t.cbegin(), t.cend());
-}
-
-REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+ typename TypeParam::mapped_type>,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
index d3543936f7..4c2050d5bd 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -1,86 +1,86 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
-
-#include <memory>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordMap>
-class ModifiersTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(ModifiersTest);
-
-TYPED_TEST_P(ModifiersTest, Clear) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- m.clear();
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
- EXPECT_TRUE(m.empty());
-}
-
-TYPED_TEST_P(ModifiersTest, Insert) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- auto p = m.insert(val);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(val, *p.first);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- p = m.insert(val2);
- EXPECT_FALSE(p.second);
- EXPECT_EQ(val, *p.first);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertHint) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- auto it = m.insert(m.end(), val);
- EXPECT_TRUE(it != m.end());
- EXPECT_EQ(val, *it);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- it = m.insert(it, val2);
- EXPECT_TRUE(it != m.end());
- EXPECT_EQ(val, *it);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertRange) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- m.insert(values.begin(), values.end());
- ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
-}
-
+namespace container_internal {
+
+template <class UnordMap>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.insert(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.insert(it, val2);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
@@ -113,239 +113,239 @@ TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
#endif
}
-TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
-#ifdef UNORDERED_MAP_CXX17
- using std::get;
- using K = typename TypeParam::key_type;
- using V = typename TypeParam::mapped_type;
- K k = hash_internal::Generator<K>()();
- V val = hash_internal::Generator<V>()();
- TypeParam m;
- auto p = m.insert_or_assign(k, val);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(k, get<0>(*p.first));
- EXPECT_EQ(val, get<1>(*p.first));
- V val2 = hash_internal::Generator<V>()();
- p = m.insert_or_assign(k, val2);
- EXPECT_FALSE(p.second);
- EXPECT_EQ(k, get<0>(*p.first));
- EXPECT_EQ(val2, get<1>(*p.first));
-#endif
-}
-
-TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
-#ifdef UNORDERED_MAP_CXX17
- using std::get;
- using K = typename TypeParam::key_type;
- using V = typename TypeParam::mapped_type;
- K k = hash_internal::Generator<K>()();
- V val = hash_internal::Generator<V>()();
- TypeParam m;
- auto it = m.insert_or_assign(m.end(), k, val);
- EXPECT_TRUE(it != m.end());
- EXPECT_EQ(k, get<0>(*it));
- EXPECT_EQ(val, get<1>(*it));
- V val2 = hash_internal::Generator<V>()();
- it = m.insert_or_assign(it, k, val2);
- EXPECT_EQ(k, get<0>(*it));
- EXPECT_EQ(val2, get<1>(*it));
-#endif
-}
-
-TYPED_TEST_P(ModifiersTest, Emplace) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto p = m.emplace(val);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(val, *p.first);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- p = m.emplace(val2);
- EXPECT_FALSE(p.second);
- EXPECT_EQ(val, *p.first);
-}
-
-TYPED_TEST_P(ModifiersTest, EmplaceHint) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto it = m.emplace_hint(m.end(), val);
- EXPECT_EQ(val, *it);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- it = m.emplace_hint(it, val2);
- EXPECT_EQ(val, *it);
-}
-
-TYPED_TEST_P(ModifiersTest, TryEmplace) {
-#ifdef UNORDERED_MAP_CXX17
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto p = m.try_emplace(val.first, val.second);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(val, *p.first);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- p = m.try_emplace(val2.first, val2.second);
- EXPECT_FALSE(p.second);
- EXPECT_EQ(val, *p.first);
-#endif
-}
-
-TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
-#ifdef UNORDERED_MAP_CXX17
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto it = m.try_emplace(m.end(), val.first, val.second);
- EXPECT_EQ(val, *it);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- it = m.try_emplace(it, val2.first, val2.second);
- EXPECT_EQ(val, *it);
-#endif
-}
-
-template <class V>
-using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
-
-// In openmap we chose not to return the iterator from erase because that's
-// more expensive. As such we adapt erase to return an iterator here.
-struct EraseFirst {
- template <class Map>
- auto operator()(Map* m, int) const
- -> IfNotVoid<decltype(m->erase(m->begin()))> {
- return m->erase(m->begin());
- }
- template <class Map>
- typename Map::iterator operator()(Map* m, ...) const {
- auto it = m->begin();
- m->erase(it++);
- return it;
- }
-};
-
-TYPED_TEST_P(ModifiersTest, Erase) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using std::get;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- auto& first = *m.begin();
- std::vector<T> values2;
- for (const auto& val : values)
- if (get<0>(val) != get<0>(first)) values2.push_back(val);
- auto it = EraseFirst()(&m, 0);
- ASSERT_TRUE(it != m.end());
- EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
- values2.end()));
-}
-
-TYPED_TEST_P(ModifiersTest, EraseRange) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- auto it = m.erase(m.begin(), m.end());
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
- EXPECT_TRUE(it == m.end());
-}
-
-TYPED_TEST_P(ModifiersTest, EraseKey) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_EQ(1, m.erase(values[0].first));
- EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
- EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
- values.end()));
-}
-
-TYPED_TEST_P(ModifiersTest, Swap) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> v1;
- std::vector<T> v2;
- std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
- std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
- TypeParam m1(v1.begin(), v1.end());
- TypeParam m2(v2.begin(), v2.end());
- EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
- EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
- m1.swap(m2);
- EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
- EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
-}
-
-// TODO(alkis): Write tests for extract.
-// TODO(alkis): Write tests for merge.
-
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto p = m.insert_or_assign(k, val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val, get<1>(*p.first));
+ V val2 = hash_internal::Generator<V>()();
+ p = m.insert_or_assign(k, val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val2, get<1>(*p.first));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto it = m.insert_or_assign(m.end(), k, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val, get<1>(*it));
+ V val2 = hash_internal::Generator<V>()();
+ it = m.insert_or_assign(it, k, val2);
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val2, get<1>(*it));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.emplace(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.emplace_hint(it, val2);
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.try_emplace(val.first, val.second);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, val2.second);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.try_emplace(m.end(), val.first, val.second);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.try_emplace(it, val2.first, val2.second);
+ EXPECT_EQ(val, *it);
+#endif
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using std::get;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto& first = *m.begin();
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (get<0>(val) != get<0>(first)) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0].first));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, InsertWithinCapacity,
InsertRangeWithinCapacity, InsertOrAssign,
InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
-
-template <typename Type>
-struct is_unique_ptr : std::false_type {};
-
-template <typename Type>
-struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};
-
-template <class UnordMap>
-class UniquePtrModifiersTest : public ::testing::Test {
- protected:
- UniquePtrModifiersTest() {
- static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
- "UniquePtrModifiersTyest may only be called with a "
- "std::unique_ptr value type.");
- }
-};
-
+
+template <typename Type>
+struct is_unique_ptr : std::false_type {};
+
+template <typename Type>
+struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};
+
+template <class UnordMap>
+class UniquePtrModifiersTest : public ::testing::Test {
+ protected:
+ UniquePtrModifiersTest() {
+ static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
+ "UniquePtrModifiersTyest may only be called with a "
+ "std::unique_ptr value type.");
+ }
+};
+
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
-TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
-
-// Test that we do not move from rvalue arguments if an insertion does not
-// happen.
-TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
-#ifdef UNORDERED_MAP_CXX17
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- auto p = m.try_emplace(val.first, std::move(val.second));
- EXPECT_TRUE(p.second);
- // A moved from std::unique_ptr is guaranteed to be nullptr.
- EXPECT_EQ(val.second, nullptr);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- p = m.try_emplace(val2.first, std::move(val2.second));
- EXPECT_FALSE(p.second);
- EXPECT_NE(val2.second, nullptr);
-#endif
-}
-
-REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
-
-} // namespace container_internal
+TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
+
+// Test that we do not move from rvalue arguments if an insertion does not
+// happen.
+TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.try_emplace(val.first, std::move(val.second));
+ EXPECT_TRUE(p.second);
+ // A moved from std::unique_ptr is guaranteed to be nullptr.
+ EXPECT_EQ(val.second, nullptr);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, std::move(val2.second));
+ EXPECT_FALSE(p.second);
+ EXPECT_NE(val2.second, nullptr);
+#endif
+}
+
+REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
index 41165b05e9..70c2322fb6 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
@@ -1,496 +1,496 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
-
-#include <algorithm>
-#include <unordered_set>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-#include "absl/meta/type_traits.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <unordered_set>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordMap>
-class ConstructorTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(ConstructorTest);
-
-TYPED_TEST_P(ConstructorTest, NoArgs) {
- TypeParam m;
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCount) {
- TypeParam m(123);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHash) {
- using H = typename TypeParam::hasher;
- H hasher;
- TypeParam m(123, hasher);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- H hasher;
- E equal;
- TypeParam m(123, hasher, equal);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-
- const auto& cm = m;
- EXPECT_EQ(cm.hash_function(), hasher);
- EXPECT_EQ(cm.key_eq(), equal);
- EXPECT_EQ(cm.get_allocator(), alloc);
- EXPECT_TRUE(cm.empty());
- EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
- EXPECT_GE(cm.bucket_count(), 123);
-}
-
-template <typename T>
-struct is_std_unordered_set : std::false_type {};
-
-template <typename... T>
-struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
-
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
-using has_cxx14_std_apis = std::true_type;
-#else
-using has_cxx14_std_apis = std::false_type;
-#endif
-
-template <typename T>
-using expect_cxx14_apis =
- absl::disjunction<absl::negation<is_std_unordered_set<T>>,
- has_cxx14_std_apis>;
-
-template <typename TypeParam>
-void BucketCountAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void BucketCountAllocTest(std::true_type) {
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- TypeParam m(123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
- BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void BucketCountHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void BucketCountHashAllocTest(std::true_type) {
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
- TypeParam m(123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
- BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
-using has_alloc_std_constructors = std::true_type;
-#else
-using has_alloc_std_constructors = std::false_type;
-#endif
-
-template <typename T>
-using expect_alloc_constructors =
- absl::disjunction<absl::negation<is_std_unordered_set<T>>,
- has_alloc_std_constructors>;
-
-template <typename TypeParam>
-void AllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void AllocTest(std::true_type) {
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- TypeParam m(alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_TRUE(m.empty());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
-}
-
-TYPED_TEST_P(ConstructorTest, Alloc) {
- AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- std::vector<T> values;
- for (size_t i = 0; i != 10; ++i)
- values.push_back(hash_internal::Generator<T>()());
- TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-template <typename TypeParam>
-void InputIteratorBucketAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InputIteratorBucketAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using A = typename TypeParam::allocator_type;
- A alloc(0);
- std::vector<T> values;
- for (size_t i = 0; i != 10; ++i)
- values.push_back(hash_internal::Generator<T>()());
- TypeParam m(values.begin(), values.end(), 123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
- InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void InputIteratorBucketHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InputIteratorBucketHashAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
- std::vector<T> values;
- for (size_t i = 0; i != 10; ++i)
- values.push_back(hash_internal::Generator<T>()());
- TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
- InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, CopyConstructor) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
- TypeParam n(m);
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
- EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
-}
-
-template <typename TypeParam>
-void CopyConstructorAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void CopyConstructorAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
- TypeParam n(m, A(11));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_NE(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
- CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-// TODO(alkis): Test non-propagating allocators on copy constructors.
-
-TYPED_TEST_P(ConstructorTest, MoveConstructor) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
- TypeParam t(m);
- TypeParam n(std::move(t));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-template <typename TypeParam>
-void MoveConstructorAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void MoveConstructorAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
- TypeParam t(m);
- TypeParam n(std::move(t), A(1));
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_NE(m.get_allocator(), n.get_allocator());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
- MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
-}
-
-// TODO(alkis): Test non-propagating allocators on move constructors.
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- TypeParam m(values, 123, hasher, equal, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.key_eq(), equal);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-template <typename TypeParam>
-void InitializerListBucketAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InitializerListBucketAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using A = typename TypeParam::allocator_type;
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- A alloc(0);
- TypeParam m(values, 123, alloc);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
- InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-template <typename TypeParam>
-void InitializerListBucketHashAllocTest(std::false_type) {}
-
-template <typename TypeParam>
-void InitializerListBucketHashAllocTest(std::true_type) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using A = typename TypeParam::allocator_type;
- H hasher;
- A alloc(0);
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m(values, 123, hasher, alloc);
- EXPECT_EQ(m.hash_function(), hasher);
- EXPECT_EQ(m.get_allocator(), alloc);
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_GE(m.bucket_count(), 123);
-}
-
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
- InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
-}
-
-TYPED_TEST_P(ConstructorTest, CopyAssignment) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- hash_internal::Generator<T> gen;
- TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
- TypeParam n;
- n = m;
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m, n);
-}
-
-// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
-// (it depends on traits).
-
-TYPED_TEST_P(ConstructorTest, MoveAssignment) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using H = typename TypeParam::hasher;
- using E = typename TypeParam::key_equal;
- using A = typename TypeParam::allocator_type;
- H hasher;
- E equal;
- A alloc(0);
- hash_internal::Generator<T> gen;
- TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
- TypeParam t(m);
- TypeParam n;
- n = std::move(t);
- EXPECT_EQ(m.hash_function(), n.hash_function());
- EXPECT_EQ(m.key_eq(), n.key_eq());
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m;
- m = values;
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- TypeParam m({gen(), gen(), gen()});
- TypeParam n({gen()});
- n = m;
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- TypeParam m({gen(), gen(), gen()});
- TypeParam t(m);
- TypeParam n({gen()});
- n = std::move(t);
- EXPECT_EQ(m, n);
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m;
- m = values;
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
- using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
- std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
- TypeParam m(values);
- m = *&m; // Avoid -Wself-assign.
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
-}
-
-REGISTER_TYPED_TEST_CASE_P(
- ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
- InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
- InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
- MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
- InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
- MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
- MoveAssignmentOverwritesExisting,
- AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+
+ const auto& cm = m;
+ EXPECT_EQ(cm.hash_function(), hasher);
+ EXPECT_EQ(cm.key_eq(), equal);
+ EXPECT_EQ(cm.get_allocator(), alloc);
+ EXPECT_TRUE(cm.empty());
+ EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
+ EXPECT_GE(cm.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+ EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign.
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
index 8f2f4b207e..9545b8cf22 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
@@ -1,91 +1,91 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordSet>
-class LookupTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(LookupTest);
-
-TYPED_TEST_P(LookupTest, Count) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& v : values)
- EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
- m.insert(values.begin(), values.end());
- for (const auto& v : values)
- EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
-}
-
-TYPED_TEST_P(LookupTest, Find) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& v : values)
- EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
- m.insert(values.begin(), values.end());
- for (const auto& v : values) {
- typename TypeParam::iterator it = m.find(v);
- static_assert(std::is_same<const typename TypeParam::value_type&,
- decltype(*it)>::value,
- "");
- static_assert(std::is_same<const typename TypeParam::value_type*,
- decltype(it.operator->())>::value,
- "");
- EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
- EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
- }
-}
-
-TYPED_TEST_P(LookupTest, EqualRange) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- for (const auto& v : values) {
- auto r = m.equal_range(v);
- ASSERT_EQ(0, std::distance(r.first, r.second));
- }
- m.insert(values.begin(), values.end());
- for (const auto& v : values) {
- auto r = m.equal_range(v);
- ASSERT_EQ(1, std::distance(r.first, r.second));
- EXPECT_EQ(v, *r.first);
- }
-}
-
-REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class UnordSet>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values)
+ EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ typename TypeParam::iterator it = m.find(v);
+ static_assert(std::is_same<const typename TypeParam::value_type&,
+ decltype(*it)>::value,
+ "");
+ static_assert(std::is_same<const typename TypeParam::value_type*,
+ decltype(it.operator->())>::value,
+ "");
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
+ EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(v, *r.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_members_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_members_test.h
index 4c5e104af2..86fe0ddcab 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_members_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_members_test.h
@@ -1,86 +1,86 @@
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
-
-#include <type_traits>
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/meta/type_traits.h"
-
-namespace absl {
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordSet>
-class MembersTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(MembersTest);
-
-template <typename T>
-void UseType() {}
-
-TYPED_TEST_P(MembersTest, Typedefs) {
- EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
- typename TypeParam::value_type>()));
- EXPECT_TRUE((absl::conjunction<
- absl::negation<std::is_signed<typename TypeParam::size_type>>,
- std::is_integral<typename TypeParam::size_type>>()));
- EXPECT_TRUE((absl::conjunction<
- std::is_signed<typename TypeParam::difference_type>,
- std::is_integral<typename TypeParam::difference_type>>()));
- EXPECT_TRUE((std::is_convertible<
- decltype(std::declval<const typename TypeParam::hasher&>()(
- std::declval<const typename TypeParam::key_type&>())),
- size_t>()));
- EXPECT_TRUE((std::is_convertible<
- decltype(std::declval<const typename TypeParam::key_equal&>()(
- std::declval<const typename TypeParam::key_type&>(),
- std::declval<const typename TypeParam::key_type&>())),
- bool>()));
- EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
- typename TypeParam::value_type>()));
- EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
- typename TypeParam::reference>()));
- EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
- typename TypeParam::const_reference>()));
- EXPECT_TRUE((std::is_same<typename std::allocator_traits<
- typename TypeParam::allocator_type>::pointer,
- typename TypeParam::pointer>()));
- EXPECT_TRUE(
- (std::is_same<typename std::allocator_traits<
- typename TypeParam::allocator_type>::const_pointer,
- typename TypeParam::const_pointer>()));
-}
-
-TYPED_TEST_P(MembersTest, SimpleFunctions) {
- EXPECT_GT(TypeParam().max_size(), 0);
-}
-
-TYPED_TEST_P(MembersTest, BeginEnd) {
- TypeParam t = {typename TypeParam::value_type{}};
- EXPECT_EQ(t.begin(), t.cbegin());
- EXPECT_EQ(t.end(), t.cend());
- EXPECT_NE(t.begin(), t.end());
- EXPECT_NE(t.cbegin(), t.cend());
-}
-
-REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
-
-} // namespace container_internal
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
index 6e473e45da..ce7dd1a334 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -1,79 +1,79 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
-#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/internal/hash_generator_testing.h"
-#include "absl/container/internal/hash_policy_testing.h"
-
-namespace absl {
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-template <class UnordSet>
-class ModifiersTest : public ::testing::Test {};
-
-TYPED_TEST_SUITE_P(ModifiersTest);
-
-TYPED_TEST_P(ModifiersTest, Clear) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- m.clear();
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_TRUE(m.empty());
-}
-
-TYPED_TEST_P(ModifiersTest, Insert) {
- using T = hash_internal::GeneratedType<TypeParam>;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- auto p = m.insert(val);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(val, *p.first);
- p = m.insert(val);
- EXPECT_FALSE(p.second);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertHint) {
- using T = hash_internal::GeneratedType<TypeParam>;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- auto it = m.insert(m.end(), val);
- EXPECT_TRUE(it != m.end());
- EXPECT_EQ(val, *it);
- it = m.insert(it, val);
- EXPECT_TRUE(it != m.end());
- EXPECT_EQ(val, *it);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertRange) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m;
- m.insert(values.begin(), values.end());
- ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
-}
-
+namespace container_internal {
+
+template <class UnordSet>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.insert(val);
+ EXPECT_FALSE(p.second);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ it = m.insert(it, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
@@ -104,118 +104,118 @@ TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
#endif
}
-TYPED_TEST_P(ModifiersTest, Emplace) {
- using T = hash_internal::GeneratedType<TypeParam>;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto p = m.emplace(val);
- EXPECT_TRUE(p.second);
- EXPECT_EQ(val, *p.first);
- p = m.emplace(val);
- EXPECT_FALSE(p.second);
- EXPECT_EQ(val, *p.first);
-}
-
-TYPED_TEST_P(ModifiersTest, EmplaceHint) {
- using T = hash_internal::GeneratedType<TypeParam>;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
- // with test traits/policy.
- auto it = m.emplace_hint(m.end(), val);
- EXPECT_EQ(val, *it);
- it = m.emplace_hint(it, val);
- EXPECT_EQ(val, *it);
-}
-
-template <class V>
-using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
-
-// In openmap we chose not to return the iterator from erase because that's
-// more expensive. As such we adapt erase to return an iterator here.
-struct EraseFirst {
- template <class Map>
- auto operator()(Map* m, int) const
- -> IfNotVoid<decltype(m->erase(m->begin()))> {
- return m->erase(m->begin());
- }
- template <class Map>
- typename Map::iterator operator()(Map* m, ...) const {
- auto it = m->begin();
- m->erase(it++);
- return it;
- }
-};
-
-TYPED_TEST_P(ModifiersTest, Erase) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- std::vector<T> values2;
- for (const auto& val : values)
- if (val != *m.begin()) values2.push_back(val);
- auto it = EraseFirst()(&m, 0);
- ASSERT_TRUE(it != m.end());
- EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
- values2.end()));
-}
-
-TYPED_TEST_P(ModifiersTest, EraseRange) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- auto it = m.erase(m.begin(), m.end());
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
- EXPECT_TRUE(it == m.end());
-}
-
-TYPED_TEST_P(ModifiersTest, EraseKey) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> values;
- std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
- TypeParam m(values.begin(), values.end());
- ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
- EXPECT_EQ(1, m.erase(values[0]));
- EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
- EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
- values.end()));
-}
-
-TYPED_TEST_P(ModifiersTest, Swap) {
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> v1;
- std::vector<T> v2;
- std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
- std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
- TypeParam m1(v1.begin(), v1.end());
- TypeParam m2(v2.begin(), v2.end());
- EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
- EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
- m1.swap(m2);
- EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
- EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
-}
-
-// TODO(alkis): Write tests for extract.
-// TODO(alkis): Write tests for merge.
-
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.emplace(val);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ it = m.emplace_hint(it, val);
+ EXPECT_EQ(val, *it);
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (val != *m.begin()) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0]));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, InsertWithinCapacity,
InsertRangeWithinCapacity, Emplace, EmplaceHint,
Erase, EraseRange, EraseKey, Swap);
-
-} // namespace container_internal
+
+} // namespace container_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_