aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/container/internal
diff options
context:
space:
mode:
authorprime <prime@yandex-team.ru>2022-02-10 16:46:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:01 +0300
commite34f3f0e381020a427f44fbd50463d9a04089db3 (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/restricted/abseil-cpp/absl/container/internal
parent3695a7cd42b74a4987d8d5a8f2e2443556998943 (diff)
downloadydb-e34f3f0e381020a427f44fbd50463d9a04089db3.tar.gz
Restoring authorship annotation for <prime@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/container/internal')
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make70
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree.h428
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h106
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h160
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/layout.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc14
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h246
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make72
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h2
12 files changed, 562 insertions, 562 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
index 5a37c14c4a..1933289a6d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
@@ -1,52 +1,52 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
contrib/restricted/abseil-cpp/absl/time
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
-
-SRCS(
+
+SRCS(
hashtablez_sampler.cc
hashtablez_sampler_force_weak_definition.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
index 60d6979c0f..f636c5fc73 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
@@ -192,38 +192,38 @@ struct key_compare_to_adapter<std::greater<absl::Cord>> {
using type = StringBtreeDefaultGreater;
};
-// Detects an 'absl_btree_prefer_linear_node_search' member. This is
-// a protocol used as an opt-in or opt-out of linear search.
-//
-// For example, this would be useful for key types that wrap an integer
-// and define their own cheap operator<(). For example:
-//
-// class K {
-// public:
-// using absl_btree_prefer_linear_node_search = std::true_type;
-// ...
-// private:
-// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
-// int k_;
-// };
-//
-// btree_map<K, V> m; // Uses linear search
-//
-// If T has the preference tag, then it has a preference.
-// Btree will use the tag's truth value.
-template <typename T, typename = void>
-struct has_linear_node_search_preference : std::false_type {};
-template <typename T, typename = void>
-struct prefers_linear_node_search : std::false_type {};
-template <typename T>
-struct has_linear_node_search_preference<
- T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
- : std::true_type {};
-template <typename T>
-struct prefers_linear_node_search<
- T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
- : T::absl_btree_prefer_linear_node_search {};
-
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+// For example, this would be useful for key types that wrap an integer
+// and define their own cheap operator<(). For example:
+//
+// class K {
+// public:
+// using absl_btree_prefer_linear_node_search = std::true_type;
+// ...
+// private:
+// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+// int k_;
+// };
+//
+// btree_map<K, V> m; // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : T::absl_btree_prefer_linear_node_search {};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
@@ -250,23 +250,23 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
- // For the given lookup key type, returns whether we can have multiple
- // equivalent keys in the btree. If this is a multi-container, then we can.
- // Otherwise, we can have multiple equivalent keys only if all of the
- // following conditions are met:
- // - The comparator is transparent.
- // - The lookup key type is not the same as key_type.
- // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
- // that we know has the same equivalence classes for all lookup types.
- template <typename LookupKey>
- constexpr static bool can_have_multiple_equivalent_keys() {
- return Multi ||
- (IsTransparent<key_compare>::value &&
- !std::is_same<LookupKey, Key>::value &&
- !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
- !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
- }
-
+ // For the given lookup key type, returns whether we can have multiple
+ // equivalent keys in the btree. If this is a multi-container, then we can.
+ // Otherwise, we can have multiple equivalent keys only if all of the
+ // following conditions are met:
+ // - The comparator is transparent.
+ // - The lookup key type is not the same as key_type.
+ // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
+ // that we know has the same equivalence classes for all lookup types.
+ template <typename LookupKey>
+ constexpr static bool can_have_multiple_equivalent_keys() {
+ return Multi ||
+ (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ }
+
enum {
kTargetNodeSize = TargetNodeSize,
@@ -452,7 +452,7 @@ struct SearchResult {
// useful information.
template <typename V>
struct SearchResult<V, false> {
- SearchResult() {}
+ SearchResult() {}
explicit SearchResult(V value) : value(value) {}
SearchResult(V value, MatchKind /*match*/) : value(value) {}
@@ -485,22 +485,22 @@ class btree_node {
using difference_type = typename Params::difference_type;
// Btree decides whether to use linear node search as follows:
- // - If the comparator expresses a preference, use that.
- // - If the key expresses a preference, use that.
+ // - If the comparator expresses a preference, use that.
+ // - If the key expresses a preference, use that.
// - If the key is arithmetic and the comparator is std::less or
// std::greater, choose linear.
// - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant<
bool,
- has_linear_node_search_preference<key_compare>::value
- ? prefers_linear_node_search<key_compare>::value
- : has_linear_node_search_preference<key_type>::value
- ? prefers_linear_node_search<key_type>::value
- : std::is_arithmetic<key_type>::value &&
- (std::is_same<std::less<key_type>, key_compare>::value ||
- std::is_same<std::greater<key_type>,
- key_compare>::value)>;
+ has_linear_node_search_preference<key_compare>::value
+ ? prefers_linear_node_search<key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
+ ? prefers_linear_node_search<key_type>::value
+ : std::is_arithmetic<key_type>::value &&
+ (std::is_same<std::less<key_type>, key_compare>::value ||
+ std::is_same<std::greater<key_type>,
+ key_compare>::value)>;
// This class is organized by absl::container_internal::Layout as if it had
// the following structure:
@@ -517,23 +517,23 @@ class btree_node {
// // is the same as the count of values.
// field_type finish;
// // The maximum number of values the node can hold. This is an integer in
- // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
+ // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
// // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
- // // nodes (even though there are still kNodeSlots values in the node).
+ // // nodes (even though there are still kNodeSlots values in the node).
// // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
// // to free extra bits for is_root, etc.
// field_type max_count;
//
// // The array of values. The capacity is `max_count` for leaf nodes and
- // // kNodeSlots for internal nodes. Only the values in
+ // // kNodeSlots for internal nodes. Only the values in
// // [start, finish) have been initialized and are valid.
// slot_type values[max_count];
//
// // The array of child pointers. The keys in children[i] are all less
// // than key(i). The keys in children[i + 1] are all greater than key(i).
- // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
+ // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
// // internal nodes.
- // btree_node *children[kNodeSlots + 1];
+ // btree_node *children[kNodeSlots + 1];
//
// This class is only constructed by EmptyNodeType. Normally, pointers to the
// layout above are allocated, cast to btree_node*, and de-allocated within
@@ -555,62 +555,62 @@ class btree_node {
private:
using layout_type = absl::container_internal::Layout<btree_node *, field_type,
slot_type, btree_node *>;
- constexpr static size_type SizeWithNSlots(size_type n) {
+ constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ n,
+ /*slots*/ n,
/*children*/ 0)
.AllocSize();
}
// A lower bound for the overhead of fields other than values in a leaf node.
constexpr static size_type MinimumOverhead() {
- return SizeWithNSlots(1) - sizeof(value_type);
+ return SizeWithNSlots(1) - sizeof(value_type);
}
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const int begin, const int end) {
return begin == end ? begin
- : SizeWithNSlots((begin + end) / 2 + 1) >
+ : SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
- ? NodeTargetSlots(begin, (begin + end) / 2)
- : NodeTargetSlots((begin + end) / 2 + 1, end);
+ ? NodeTargetSlots(begin, (begin + end) / 2)
+ : NodeTargetSlots((begin + end) / 2 + 1, end);
}
enum {
kTargetNodeSize = params_type::kTargetNodeSize,
- kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
+ kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
- // We need a minimum of 3 slots per internal node in order to perform
+ // We need a minimum of 3 slots per internal node in order to perform
// splitting (1 value for the two nodes involved in the split and 1 value
- // propagated to the parent as the delimiter for the split). For performance
- // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
- // of 1/3 (for a node, not a b-tree).
- kMinNodeSlots = 4,
-
- kNodeSlots =
- kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
-
+ // propagated to the parent as the delimiter for the split). For performance
+ // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
+ // of 1/3 (for a node, not a b-tree).
+ kMinNodeSlots = 4,
+
+ kNodeSlots =
+ kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
+
// The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value.
kInternalNodeMaxCount = 0,
};
- // Leaves can have less than kNodeSlots values.
- constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
+ // Leaves can have less than kNodeSlots values.
+ constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ slot_count,
+ /*slots*/ slot_count,
/*children*/ 0);
}
constexpr static layout_type InternalLayout() {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*slots*/ kNodeSlots,
- /*children*/ kNodeSlots + 1);
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
}
- constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
- return LeafLayout(slot_count).AllocSize();
+ constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
+ return LeafLayout(slot_count).AllocSize();
}
constexpr static size_type InternalSize() {
return InternalLayout().AllocSize();
@@ -667,10 +667,10 @@ class btree_node {
}
field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount.
- // Leaf nodes have max_count in [1, kNodeSlots].
+ // Leaf nodes have max_count in [1, kNodeSlots].
const field_type max_count = GetField<1>()[3];
return max_count == field_type{kInternalNodeMaxCount}
- ? field_type{kNodeSlots}
+ ? field_type{kNodeSlots}
: max_count;
}
@@ -792,7 +792,7 @@ class btree_node {
SearchResult<int, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp,
std::true_type /* IsCompareTo */) const {
- if (params_type::template can_have_multiple_equivalent_keys<K>()) {
+ if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe;
while (s != e) {
const int mid = (s + e) >> 1;
@@ -803,14 +803,14 @@ class btree_node {
e = mid;
if (c == 0) {
// Need to return the first value whose key is not less than k,
- // which requires continuing the binary search if there could be
- // multiple equivalent keys.
+ // which requires continuing the binary search if there could be
+ // multiple equivalent keys.
exact_match = MatchKind::kEq;
}
}
}
return {s, exact_match};
- } else { // Can't have multiple equivalent keys.
+ } else { // Can't have multiple equivalent keys.
while (s != e) {
const int mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
@@ -860,12 +860,12 @@ class btree_node {
start_slot(), max_count * sizeof(slot_type));
}
void init_internal(btree_node *parent) {
- init_leaf(parent, kNodeSlots);
+ init_leaf(parent, kNodeSlots);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
- &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
+ &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
}
static void deallocate(const size_type size, btree_node *node,
@@ -943,7 +943,7 @@ struct btree_iterator {
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
- using is_map_container = typename params_type::is_map_container;
+ using is_map_container = typename params_type::is_map_container;
using node_type = Node;
using normal_node = typename std::remove_const<Node>::type;
@@ -955,7 +955,7 @@ struct btree_iterator {
using slot_type = typename params_type::slot_type;
using iterator =
- btree_iterator<normal_node, normal_reference, normal_pointer>;
+ btree_iterator<normal_node, normal_reference, normal_pointer>;
using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>;
@@ -972,19 +972,19 @@ struct btree_iterator {
btree_iterator(Node *n, int p) : node(n), position(p) {}
// NOTE: this SFINAE allows for implicit conversions from iterator to
- // const_iterator, but it specifically avoids hiding the copy constructor so
- // that the trivial one will be used when possible.
+ // const_iterator, but it specifically avoids hiding the copy constructor so
+ // that the trivial one will be used when possible.
template <typename N, typename R, typename P,
absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
- btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
+ btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
: node(other.node), position(other.position) {}
private:
// This SFINAE allows explicit conversions from const_iterator to
- // iterator, but also avoids hiding the copy constructor.
+ // iterator, but also avoids hiding the copy constructor.
// NOTE: the const_cast is safe because this constructor is only called by
// non-const methods and the container owns the nodes.
template <typename N, typename R, typename P,
@@ -992,7 +992,7 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> other)
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
: node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator.
@@ -1055,8 +1055,8 @@ struct btree_iterator {
}
private:
- friend iterator;
- friend const_iterator;
+ friend iterator;
+ friend const_iterator;
template <typename Params>
friend class btree;
template <typename Tree>
@@ -1122,8 +1122,8 @@ class btree {
}
enum : uint32_t {
- kNodeSlots = node_type::kNodeSlots,
- kMinNodeValues = kNodeSlots / 2,
+ kNodeSlots = node_type::kNodeSlots,
+ kMinNodeValues = kNodeSlots / 2,
};
struct node_stats {
@@ -1154,8 +1154,8 @@ class btree {
using const_reference = typename Params::const_reference;
using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer;
- using iterator =
- typename btree_iterator<node_type, reference, pointer>::iterator;
+ using iterator =
+ typename btree_iterator<node_type, reference, pointer>::iterator;
using const_iterator = typename iterator::const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
@@ -1168,46 +1168,46 @@ class btree {
private:
// For use in copy_or_move_values_in_order.
const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
- value_type &&maybe_move_from_iterator(iterator it) {
- // This is a destructive operation on the other container so it's safe for
- // us to const_cast and move from the keys here even if it's a set.
- return std::move(const_cast<value_type &>(*it));
- }
+ value_type &&maybe_move_from_iterator(iterator it) {
+ // This is a destructive operation on the other container so it's safe for
+ // us to const_cast and move from the keys here even if it's a set.
+ return std::move(const_cast<value_type &>(*it));
+ }
// Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction,
// copy assignment, and move assignment.
template <typename Btree>
- void copy_or_move_values_in_order(Btree &other);
+ void copy_or_move_values_in_order(Btree &other);
// Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation();
public:
- btree(const key_compare &comp, const allocator_type &alloc)
- : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
-
- btree(const btree &other) : btree(other, other.allocator()) {}
- btree(const btree &other, const allocator_type &alloc)
- : btree(other.key_comp(), alloc) {
- copy_or_move_values_in_order(other);
- }
+ btree(const key_compare &comp, const allocator_type &alloc)
+ : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+ btree(const btree &other) : btree(other, other.allocator()) {}
+ btree(const btree &other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ copy_or_move_values_in_order(other);
+ }
btree(btree &&other) noexcept
: root_(std::move(other.root_)),
rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
size_(absl::exchange(other.size_, 0)) {
other.mutable_root() = EmptyNode();
}
- btree(btree &&other, const allocator_type &alloc)
- : btree(other.key_comp(), alloc) {
- if (alloc == other.allocator()) {
- swap(other);
- } else {
- // Move values from `other` one at a time when allocators are different.
- copy_or_move_values_in_order(other);
- }
- }
+ btree(btree &&other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ if (alloc == other.allocator()) {
+ swap(other);
+ } else {
+ // Move values from `other` one at a time when allocators are different.
+ copy_or_move_values_in_order(other);
+ }
+ }
~btree() {
// Put static_asserts in destructor to avoid triggering them before the type
@@ -1235,23 +1235,23 @@ class btree {
return const_reverse_iterator(begin());
}
- // Finds the first element whose key is not less than `key`.
+ // Finds the first element whose key is not less than `key`.
template <typename K>
iterator lower_bound(const K &key) {
- return internal_end(internal_lower_bound(key).value);
+ return internal_end(internal_lower_bound(key).value);
}
template <typename K>
const_iterator lower_bound(const K &key) const {
- return internal_end(internal_lower_bound(key).value);
+ return internal_end(internal_lower_bound(key).value);
}
- // Finds the first element whose key is not less than `key` and also returns
- // whether that element is equal to `key`.
+ // Finds the first element whose key is not less than `key` and also returns
+ // whether that element is equal to `key`.
+ template <typename K>
+ std::pair<iterator, bool> lower_bound_equal(const K &key) const;
+
+ // Finds the first element whose key is greater than `key`.
template <typename K>
- std::pair<iterator, bool> lower_bound_equal(const K &key) const;
-
- // Finds the first element whose key is greater than `key`.
- template <typename K>
iterator upper_bound(const K &key) {
return internal_end(internal_upper_bound(key));
}
@@ -1332,8 +1332,8 @@ class btree {
// to the element after the last erased element.
std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
- // Finds an element with key equivalent to `key` or returns `end()` if `key`
- // is not present.
+ // Finds an element with key equivalent to `key` or returns `end()` if `key`
+ // is not present.
template <typename K>
iterator find(const K &key) {
return internal_end(internal_find(key));
@@ -1407,14 +1407,14 @@ class btree {
}
}
- // The average number of bytes used per value stored in the btree assuming
- // random insertion order.
+ // The average number of bytes used per value stored in the btree assuming
+ // random insertion order.
static double average_bytes_per_value() {
- // The expected number of values per node with random insertion order is the
- // average of the maximum and minimum numbers of values per node.
- const double expected_values_per_node =
- (kNodeSlots + kMinNodeValues) / 2.0;
- return node_type::LeafSize() / expected_values_per_node;
+ // The expected number of values per node with random insertion order is the
+ // average of the maximum and minimum numbers of values per node.
+ const double expected_values_per_node =
+ (kNodeSlots + kMinNodeValues) / 2.0;
+ return node_type::LeafSize() / expected_values_per_node;
}
// The fullness of the btree. Computed as the number of elements in the btree
@@ -1424,7 +1424,7 @@ class btree {
// Returns 0 for empty trees.
double fullness() const {
if (empty()) return 0.0;
- return static_cast<double>(size()) / (nodes() * kNodeSlots);
+ return static_cast<double>(size()) / (nodes() * kNodeSlots);
}
// The overhead of the btree structure in bytes per node. Computed as the
// total number of bytes used by the btree minus the number of bytes used for
@@ -1474,7 +1474,7 @@ class btree {
}
node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
- n->init_leaf(parent, kNodeSlots);
+ n->init_leaf(parent, kNodeSlots);
return n;
}
node_type *new_leaf_root_node(const int max_count) {
@@ -1534,8 +1534,8 @@ class btree {
// Internal routine which implements lower_bound().
template <typename K>
- SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
- const K &key) const;
+ SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+ const K &key) const;
// Internal routine which implements upper_bound().
template <typename K>
@@ -1719,7 +1719,7 @@ template <typename P>
void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) {
assert(dest->count() == 0);
- assert(max_count() == kNodeSlots);
+ assert(max_count() == kNodeSlots);
// We bias the split based on the position being inserted. If we're
// inserting at the beginning of the left node then bias the split to put
@@ -1727,7 +1727,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
// right node then bias the split to put more values on the left node.
if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1);
- } else if (insert_position == kNodeSlots) {
+ } else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start());
} else {
dest->set_finish(dest->start() + count() / 2);
@@ -1798,7 +1798,7 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// Navigate to the leftmost leaf under node, and then delete upwards.
while (!node->leaf()) node = node->start_child();
- // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
+ // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
// isn't guaranteed to be a valid `field_type`.
int pos = node->position();
btree_node *parent = node->parent();
@@ -1886,7 +1886,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods
template <typename P>
template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree &other) {
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
@@ -1894,11 +1894,11 @@ void btree<P>::copy_or_move_values_in_order(Btree &other) {
// We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in.
- auto iter = other.begin();
- if (iter == other.end()) return;
+ auto iter = other.begin();
+ if (iter == other.end()) return;
insert_multi(maybe_move_from_iterator(iter));
++iter;
- for (; iter != other.end(); ++iter) {
+ for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1917,7 +1917,7 @@ constexpr bool btree<P>::static_assert_validation() {
// Note: We assert that kTargetValues, which is computed from
// Params::kTargetNodeSize, must fit the node_type::field_type.
static_assert(
- kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
+ kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
"target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
@@ -1937,29 +1937,29 @@ constexpr bool btree<P>::static_assert_validation() {
}
template <typename P>
-template <typename K>
-auto btree<P>::lower_bound_equal(const K &key) const
- -> std::pair<iterator, bool> {
- const SearchResult<iterator, is_key_compare_to::value> res =
- internal_lower_bound(key);
- const iterator lower = iterator(internal_end(res.value));
- const bool equal = res.HasMatch()
- ? res.IsEq()
- : lower != end() && !compare_keys(key, lower.key());
- return {lower, equal};
+template <typename K>
+auto btree<P>::lower_bound_equal(const K &key) const
+ -> std::pair<iterator, bool> {
+ const SearchResult<iterator, is_key_compare_to::value> res =
+ internal_lower_bound(key);
+ const iterator lower = iterator(internal_end(res.value));
+ const bool equal = res.HasMatch()
+ ? res.IsEq()
+ : lower != end() && !compare_keys(key, lower.key());
+ return {lower, equal};
}
template <typename P>
template <typename K>
auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
- const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
- const iterator lower = lower_and_equal.first;
- if (!lower_and_equal.second) {
- return {lower, lower};
- }
+ const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
+ const iterator lower = lower_and_equal.first;
+ if (!lower_and_equal.second) {
+ return {lower, lower};
+ }
const iterator next = std::next(lower);
- if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
// The next iterator after lower must point to a key greater than `key`.
// Note: if this assert fails, then it may indicate that the comparator does
// not meet the equivalence requirements for Compare
@@ -1970,7 +1970,7 @@ auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
// Try once more to avoid the call to upper_bound() if there's only one
// equivalent key. This should prevent all calls to upper_bound() in cases of
// unique-containers with heterogeneous comparators in which all comparison
- // operators have the same equivalence classes.
+ // operators have the same equivalence classes.
if (next == end() || compare_keys(key, next.key())) return {lower, next};
// In this case, we need to call upper_bound() to avoid worst case O(N)
@@ -2101,7 +2101,7 @@ auto btree<P>::operator=(const btree &other) -> btree & {
*mutable_allocator() = other.allocator();
}
- copy_or_move_values_in_order(other);
+ copy_or_move_values_in_order(other);
}
return *this;
}
@@ -2131,7 +2131,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
// comparator while moving the values so we can't swap the key
// comparators.
*mutable_key_comp() = other.key_comp();
- copy_or_move_values_in_order(other);
+ copy_or_move_values_in_order(other);
}
}
}
@@ -2298,7 +2298,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
node_type *&node = iter->node;
int &insert_position = iter->position;
assert(node->count() == node->max_count());
- assert(kNodeSlots == node->max_count());
+ assert(kNodeSlots == node->max_count());
// First try to make room on the node by rebalancing.
node_type *parent = node->parent();
@@ -2306,17 +2306,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() > parent->start()) {
// Try rebalancing with our left sibling.
node_type *left = parent->child(node->position() - 1);
- assert(left->max_count() == kNodeSlots);
- if (left->count() < kNodeSlots) {
+ assert(left->max_count() == kNodeSlots);
+ if (left->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the end of the right node then we bias rebalancing to
// fill up the left node.
- int to_move = (kNodeSlots - left->count()) /
- (1 + (insert_position < static_cast<int>(kNodeSlots)));
+ int to_move = (kNodeSlots - left->count()) /
+ (1 + (insert_position < static_cast<int>(kNodeSlots)));
to_move = (std::max)(1, to_move);
if (insert_position - to_move >= node->start() ||
- left->count() + to_move < static_cast<int>(kNodeSlots)) {
+ left->count() + to_move < static_cast<int>(kNodeSlots)) {
left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move);
@@ -2335,17 +2335,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() < parent->finish()) {
// Try rebalancing with our right sibling.
node_type *right = parent->child(node->position() + 1);
- assert(right->max_count() == kNodeSlots);
- if (right->count() < kNodeSlots) {
+ assert(right->max_count() == kNodeSlots);
+ if (right->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the beginning of the left node then we bias rebalancing
// to fill up the right node.
- int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
+ int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
(1 + (insert_position > node->start()));
to_move = (std::max)(1, to_move);
if (insert_position <= node->finish() - to_move ||
- right->count() + to_move < static_cast<int>(kNodeSlots)) {
+ right->count() + to_move < static_cast<int>(kNodeSlots)) {
node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) {
@@ -2361,8 +2361,8 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Rebalancing failed, make sure there is room on the parent node for a new
// value.
- assert(parent->max_count() == kNodeSlots);
- if (parent->count() == kNodeSlots) {
+ assert(parent->max_count() == kNodeSlots);
+ if (parent->count() == kNodeSlots) {
iterator parent_iter(node->parent(), node->position());
rebalance_or_split(&parent_iter);
}
@@ -2407,8 +2407,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() > parent->start()) {
// Try merging with our left sibling.
node_type *left = parent->child(iter->node->position() - 1);
- assert(left->max_count() == kNodeSlots);
- if (1U + left->count() + iter->node->count() <= kNodeSlots) {
+ assert(left->max_count() == kNodeSlots);
+ if (1U + left->count() + iter->node->count() <= kNodeSlots) {
iter->position += 1 + left->count();
merge_nodes(left, iter->node);
iter->node = left;
@@ -2418,8 +2418,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() < parent->finish()) {
// Try merging with our right sibling.
node_type *right = parent->child(iter->node->position() + 1);
- assert(right->max_count() == kNodeSlots);
- if (1U + iter->node->count() + right->count() <= kNodeSlots) {
+ assert(right->max_count() == kNodeSlots);
+ if (1U + iter->node->count() + right->count() <= kNodeSlots) {
merge_nodes(iter->node, right);
return true;
}
@@ -2500,12 +2500,12 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) {
// Make room in the leaf for the new item.
- if (max_count < kNodeSlots) {
+ if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
assert(iter.node == root());
iter.node =
- new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
+ new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root.
node_type *old_root = root();
node_type *new_root = iter.node;
@@ -2552,27 +2552,27 @@ inline auto btree<P>::internal_locate(const K &key) const
template <typename P>
template <typename K>
-auto btree<P>::internal_lower_bound(const K &key) const
- -> SearchResult<iterator, is_key_compare_to::value> {
- if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
- SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
- ret.value = internal_last(ret.value);
- return ret;
- }
+auto btree<P>::internal_lower_bound(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
+ ret.value = internal_last(ret.value);
+ return ret;
+ }
iterator iter(const_cast<node_type *>(root()));
- SearchResult<int, is_key_compare_to::value> res;
- bool seen_eq = false;
+ SearchResult<int, is_key_compare_to::value> res;
+ bool seen_eq = false;
for (;;) {
- res = iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
+ res = iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
if (iter.node->leaf()) {
break;
}
- seen_eq = seen_eq || res.IsEq();
+ seen_eq = seen_eq || res.IsEq();
iter.node = iter.node->child(iter.position);
}
- if (res.IsEq()) return {iter, MatchKind::kEq};
- return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
+ if (res.IsEq()) return {iter, MatchKind::kEq};
+ return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
}
template <typename P>
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
index 83c411a6e2..a99668c713 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
@@ -24,7 +24,7 @@
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
-#include "absl/memory/memory.h"
+#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -70,21 +70,21 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
- explicit btree_container(const allocator_type &alloc)
- : tree_(key_compare(), alloc) {}
-
- btree_container(const btree_container &other)
- : btree_container(other, absl::allocator_traits<allocator_type>::
- select_on_container_copy_construction(
- other.get_allocator())) {}
- btree_container(const btree_container &other, const allocator_type &alloc)
- : tree_(other.tree_, alloc) {}
-
- btree_container(btree_container &&other) noexcept(
- std::is_nothrow_move_constructible<Tree>::value) = default;
- btree_container(btree_container &&other, const allocator_type &alloc)
- : tree_(std::move(other.tree_), alloc) {}
-
+ explicit btree_container(const allocator_type &alloc)
+ : tree_(key_compare(), alloc) {}
+
+ btree_container(const btree_container &other)
+ : btree_container(other, absl::allocator_traits<allocator_type>::
+ select_on_container_copy_construction(
+ other.get_allocator())) {}
+ btree_container(const btree_container &other, const allocator_type &alloc)
+ : tree_(other.tree_, alloc) {}
+
+ btree_container(btree_container &&other) noexcept(
+ std::is_nothrow_move_constructible<Tree>::value) = default;
+ btree_container(btree_container &&other, const allocator_type &alloc)
+ : tree_(std::move(other.tree_), alloc) {}
+
btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
@@ -105,11 +105,11 @@ class btree_container {
// Lookup routines.
template <typename K = key_type>
- size_type count(const key_arg<K> &key) const {
- auto equal_range = this->equal_range(key);
- return std::distance(equal_range.first, equal_range.second);
- }
- template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ auto equal_range = this->equal_range(key);
+ return std::distance(equal_range.first, equal_range.second);
+ }
+ template <typename K = key_type>
iterator find(const key_arg<K> &key) {
return tree_.find(key);
}
@@ -158,11 +158,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second;
}
- template <typename K = key_type>
- size_type erase(const key_arg<K> &key) {
- auto equal_range = this->equal_range(key);
- return tree_.erase_range(equal_range.first, equal_range.second).first;
- }
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ auto equal_range = this->equal_range(key);
+ return tree_.erase_range(equal_range.first, equal_range.second).first;
+ }
// Extract routines.
node_type extract(iterator position) {
@@ -259,7 +259,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type;
btree_set_container() {}
- // Range constructors.
+ // Range constructors.
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -267,19 +267,19 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
- template <class InputIterator>
- btree_set_container(InputIterator b, InputIterator e,
- const allocator_type &alloc)
- : btree_set_container(b, e, key_compare(), alloc) {}
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_set_container(b, e, key_compare(), alloc) {}
- // Initializer list constructors.
+ // Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {}
- btree_set_container(std::initializer_list<init_type> init,
- const allocator_type &alloc)
- : btree_set_container(init.begin(), init.end(), alloc) {}
+ btree_set_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_set_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) {
@@ -341,10 +341,10 @@ class btree_set_container : public btree_container<Tree> {
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- const std::pair<iterator, bool> lower_and_equal =
- this->tree_.lower_bound_equal(key);
- return lower_and_equal.second ? extract(lower_and_equal.first)
- : node_type();
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
@@ -389,7 +389,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type;
- friend class BtreeNodePeer;
+ friend class BtreeNodePeer;
private:
template <class K>
@@ -554,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type;
btree_multiset_container() {}
- // Range constructors.
+ // Range constructors.
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -562,19 +562,19 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
- template <class InputIterator>
- btree_multiset_container(InputIterator b, InputIterator e,
- const allocator_type &alloc)
- : btree_multiset_container(b, e, key_compare(), alloc) {}
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_multiset_container(b, e, key_compare(), alloc) {}
- // Initializer list constructors.
+ // Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
- btree_multiset_container(std::initializer_list<init_type> init,
- const allocator_type &alloc)
- : btree_multiset_container(init.begin(), init.end(), alloc) {}
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_multiset_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
@@ -623,10 +623,10 @@ class btree_multiset_container : public btree_container<Tree> {
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- const std::pair<iterator, bool> lower_and_equal =
- this->tree_.lower_bound_equal(key);
- return lower_and_equal.second ? extract(lower_and_equal.first)
- : node_type();
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index f1c72287ef..40cce0479e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -70,7 +70,7 @@ void HashtablezInfo::PrepareForSampling() {
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
- hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ hashes_bitwise_xor.store(0, std::memory_order_relaxed);
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
@@ -93,9 +93,9 @@ static bool ShouldForceSampling() {
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) {
- state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
- ? kForce
- : kDontForce;
+ state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+ ? kForce
+ : kDontForce;
global_state.store(state, std::memory_order_relaxed);
}
return state == kForce;
@@ -154,7 +154,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
- info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length),
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 0064307c9a..91fcdb34a3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -79,7 +79,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
- std::atomic<size_t> hashes_bitwise_xor;
+ std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
// All of the fields below are set by `PrepareForSampling`, they must not be
@@ -272,7 +272,7 @@ void SetHashtablezMaxSamples(int32_t max);
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
-extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
+extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
index 2166c3f189..ed35a7eec3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -21,8 +21,8 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
// See hashtablez_sampler.h for details.
-extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
- AbslContainerInternalSampleEverything)() {
+extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
+ AbslContainerInternalSampleEverything)() {
return false;
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 34a78e0498..1d7d6cda72 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -36,13 +36,13 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal {
-// GCC does not deal very well with the below code
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic push
+// GCC does not deal very well with the below code
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
template <typename A>
using AllocatorTraits = std::allocator_traits<A>;
template <typename A>
@@ -110,7 +110,7 @@ struct Allocation {
Pointer<A> data;
SizeType<A> capacity;
};
-
+
template <typename A,
bool IsOverAligned =
(alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
@@ -119,13 +119,13 @@ struct MallocAdapter {
return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
requested_capacity};
}
-
+
static void Deallocate(A& allocator, Pointer<A> pointer,
SizeType<A> capacity) {
AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
}
};
-
+
template <typename A, typename ValueAdapter>
void ConstructElements(NoTypeDeduction<A>& allocator,
Pointer<A> construct_first, ValueAdapter& values,
@@ -303,14 +303,14 @@ class Storage {
: metadata_(allocator, /* size and is_allocated */ 0) {}
~Storage() {
- if (GetSizeAndIsAllocated() == 0) {
- // Empty and not allocated; nothing to do.
+ if (GetSizeAndIsAllocated() == 0) {
+ // Empty and not allocated; nothing to do.
} else if (IsMemcpyOk<A>::value) {
- // No destructors need to be run; just deallocate if necessary.
- DeallocateIfAllocated();
- } else {
- DestroyContents();
- }
+ // No destructors need to be run; just deallocate if necessary.
+ DeallocateIfAllocated();
+ } else {
+ DestroyContents();
+ }
}
// ---------------------------------------------------------------------------
@@ -364,8 +364,8 @@ class Storage {
// Storage Member Mutators
// ---------------------------------------------------------------------------
- ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
-
+ ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
+
template <typename ValueAdapter>
void Initialize(ValueAdapter values, SizeType<A> new_size);
@@ -441,8 +441,8 @@ class Storage {
}
private:
- ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
-
+ ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
+
using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
@@ -459,51 +459,51 @@ class Storage {
Inlined inlined;
};
- template <typename... Args>
+ template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
-
+
Metadata metadata_;
Data data_;
};
template <typename T, size_t N, typename A>
-void Storage<T, N, A>::DestroyContents() {
+void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
DestroyElements<A>(GetAllocator(), data, GetSize());
- DeallocateIfAllocated();
-}
-
-template <typename T, size_t N, typename A>
-void Storage<T, N, A>::InitFrom(const Storage& other) {
+ DeallocateIfAllocated();
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::InitFrom(const Storage& other) {
const SizeType<A> n = other.GetSize();
- assert(n > 0); // Empty sources handled handled in caller.
+ assert(n > 0); // Empty sources handled handled in caller.
ConstPointer<A> src;
Pointer<A> dst;
- if (!other.GetIsAllocated()) {
- dst = GetInlinedData();
- src = other.GetInlinedData();
- } else {
- // Because this is only called from the `InlinedVector` constructors, it's
- // safe to take on the allocation with size `0`. If `ConstructElements(...)`
- // throws, deallocation will be automatically handled by `~Storage()`.
+ if (!other.GetIsAllocated()) {
+ dst = GetInlinedData();
+ src = other.GetInlinedData();
+ } else {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
Allocation<A> allocation =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
SetAllocation(allocation);
dst = allocation.data;
- src = other.GetAllocatedData();
- }
+ src = other.GetAllocatedData();
+ }
if (IsMemcpyOk<A>::value) {
std::memcpy(reinterpret_cast<char*>(dst),
reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
- } else {
+ } else {
auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
ConstructElements<A>(GetAllocator(), dst, values, n);
- }
- GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
-}
-
-template <typename T, size_t N, typename A>
+ }
+ GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
@@ -585,20 +585,20 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
A& alloc = GetAllocator();
- if (new_size <= size) {
- // Destroy extra old elements.
+ if (new_size <= size) {
+ // Destroy extra old elements.
DestroyElements<A>(alloc, base + new_size, size - new_size);
- } else if (new_size <= storage_view.capacity) {
- // Construct new elements in place.
+ } else if (new_size <= storage_view.capacity) {
+ // Construct new elements in place.
ConstructElements<A>(alloc, base + size, values, new_size - size);
- } else {
- // Steps:
- // a. Allocate new backing store.
- // b. Construct new elements in new backing store.
- // c. Move existing elements from old backing store to now.
- // d. Destroy all elements in old backing store.
- // Use transactional wrappers for the first two steps so we can roll
- // back if necessary due to exceptions.
+ } else {
+ // Steps:
+ // a. Allocate new backing store.
+ // b. Construct new elements in new backing store.
+ // c. Move existing elements from old backing store to now.
+ // d. Destroy all elements in old backing store.
+ // Use transactional wrappers for the first two steps so we can roll
+ // back if necessary due to exceptions.
AllocationTransaction<A> allocation_tx(alloc);
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
@@ -717,20 +717,20 @@ template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
const SizeType<A> n = storage_view.size;
- if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
- // Fast path; new element fits.
+ if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+ // Fast path; new element fits.
Pointer<A> last_ptr = storage_view.data + n;
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
- AddSize(1);
- return *last_ptr;
- }
- // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
- return EmplaceBackSlow(std::forward<Args>(args)...);
-}
-
-template <typename T, size_t N, typename A>
-template <typename... Args>
+ AddSize(1);
+ return *last_ptr;
+ }
+ // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+ return EmplaceBackSlow(std::forward<Args>(args)...);
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
@@ -740,24 +740,24 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
Pointer<A> last_ptr = construct_data + storage_view.size;
- // Construct new element.
+ // Construct new element.
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
- // Move elements from old backing store to new backing store.
- ABSL_INTERNAL_TRY {
+ // Move elements from old backing store to new backing store.
+ ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
storage_view.size);
}
- ABSL_INTERNAL_CATCH_ANY {
+ ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
- ABSL_INTERNAL_RETHROW;
- }
- // Destroy elements in old backing store.
+ ABSL_INTERNAL_RETHROW;
+ }
+ // Destroy elements in old backing store.
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- DeallocateIfAllocated();
+ DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
- SetIsAllocated();
+ SetIsAllocated();
AddSize(1);
return *last_ptr;
}
@@ -921,10 +921,10 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
}
// End ignore "array-bounds" and "maybe-uninitialized"
-#if !defined(__clang__) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
} // namespace inlined_vector_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
index 23d44d7793..a59a243059 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/layout.h
@@ -404,7 +404,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
ElementAlignment<N>::value);
}
@@ -597,7 +597,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
- SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
+ SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
}
// If built with --config=asan, poisons padding bytes (if any) in the
@@ -621,7 +621,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
@@ -645,7 +645,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const {
const auto offsets = Offsets();
- const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index eea9f6ee4e..687bcb8a4d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -33,7 +33,7 @@ constexpr size_t Group::kWidth;
// Returns "random" seed.
inline size_t RandomSeed() {
-#ifdef ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
@@ -51,17 +51,17 @@ bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == ctrl_t::kSentinel);
- assert(IsValidCapacity(capacity));
+ assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
- Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
- }
- // Copy the cloned ctrl bytes.
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
-}
+}
// Extern template instantiotion for inline function.
template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 24cf740296..12682b3532 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -125,7 +125,7 @@
#include "absl/container/internal/have_sse.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/numeric/bits.h"
+#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
namespace absl {
@@ -199,9 +199,9 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
}
template <typename T>
-uint32_t TrailingZeros(T x) {
- ABSL_INTERNAL_ASSUME(x != 0);
- return countr_zero(x);
+uint32_t TrailingZeros(T x) {
+ ABSL_INTERNAL_ASSUME(x != 0);
+ return countr_zero(x);
}
// An abstraction over a bitmask. It provides an easy way to iterate through the
@@ -231,24 +231,24 @@ class BitMask {
}
explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); }
- uint32_t LowestBitSet() const {
+ uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- uint32_t HighestBitSet() const {
- return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
- uint32_t TrailingZeros() const {
+ uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- uint32_t LeadingZeros() const {
+ uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return countl_zero(mask_ << extra_bits) >> Shift;
+ return countl_zero(mask_ << extra_bits) >> Shift;
}
private:
@@ -384,8 +384,8 @@ struct GroupSse2Impl {
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
- return TrailingZeros(static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -480,23 +480,23 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
-void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- return n ? ~size_t{} >> countl_zero(n) : 1;
+ return n ? ~size_t{} >> countl_zero(n) : 1;
}
-// General notes on capacity/growth methods below:
-// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
-// average of two empty slots per group.
-// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
-// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
-// never need to probe (the whole table fits in one group) so we don't need a
-// load factor less than 1.
-
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity` of the table, returns the size (i.e. number of full slots)
+// at which we should grow the capacity.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -507,7 +507,7 @@ inline size_t CapacityToGrowth(size_t capacity) {
return capacity - capacity / 8;
}
// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+// Might not be a valid one and requires NormalizeCapacity().
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -545,66 +545,66 @@ inline void AssertIsValid(ctrl_t* ctrl) {
"been erased, or the table might have rehashed.");
}
-struct FindInfo {
- size_t offset;
- size_t probe_length;
-};
-
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
-//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
-//
-// - In small mode only the first `capacity()` control bytes after the
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+};
+
+// The representation of the object has two modes:
+// - small: For capacities < kWidth-1
+// - large: For the rest.
+//
+// Differences:
+// - In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
-inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-
+// represent a real slot. This is important to take into account on
+// find_first_non_full(), where we never try ShouldInsertBackwards() for
+// small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
- return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
-}
-
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
+ size_t capacity) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both ctrl_t::kEmpty and
// ctrl_t::kDeleted in one group. Such tables appears during
// drop_deletes_without_resize.
-//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
template <typename = void>
inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
- auto seq = probe(ctrl, hash, capacity);
- while (true) {
- Group g{ctrl + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
- if (mask) {
-#if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
-#endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
- }
- seq.next();
+ size_t capacity) {
+ auto seq = probe(ctrl, hash, capacity);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
assert(seq.index() <= capacity && "full table!");
- }
-}
-
+ }
+}
+
// Extern template for inline function keep possibility of inlining.
// When compiler decided to not inline, no symbols will be added to the
// corresponding translation unit.
@@ -872,8 +872,8 @@ class raw_hash_set {
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()),
- settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
+ : ctrl_(EmptyGroup()),
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
initialize_slots();
@@ -982,11 +982,11 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
emplace_at(target.offset, v);
- infoz().RecordInsert(hash, target.probe_length);
+ infoz().RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -1003,24 +1003,24 @@ class raw_hash_set {
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.growth_left(), 0),
- absl::exchange(that.infoz(), HashtablezInfoHandle()),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+ settings_(absl::exchange(that.growth_left(), 0),
+ absl::exchange(that.infoz(), HashtablezInfoHandle()),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),
slots_(nullptr),
size_(0),
capacity_(0),
- settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
- a) {
+ settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
+ a) {
if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_);
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
- std::swap(infoz(), that.infoz());
+ std::swap(infoz(), that.infoz());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1093,7 +1093,7 @@ class raw_hash_set {
reset_growth_left();
}
assert(empty());
- infoz().RecordStorageChanged(0, capacity_);
+ infoz().RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1166,7 +1166,7 @@ class raw_hash_set {
template <class InputIt>
void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) emplace(*first);
+ for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1193,9 +1193,9 @@ class raw_hash_set {
}
iterator insert(const_iterator, node_type&& node) {
- auto res = insert(std::move(node));
- node = std::move(res.node);
- return res.position;
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@@ -1385,7 +1385,7 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- swap(infoz(), that.infoz());
+ swap(infoz(), that.infoz());
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
@@ -1394,7 +1394,7 @@ class raw_hash_set {
if (n == 0 && capacity_ == 0) return;
if (n == 0 && size_ == 0) {
destroy_slots();
- infoz().RecordStorageChanged(0, 0);
+ infoz().RecordStorageChanged(0, 0);
infoz().RecordClearedReservation();
return;
}
@@ -1412,16 +1412,16 @@ class raw_hash_set {
}
}
- void reserve(size_t n) {
+ void reserve(size_t n) {
if (n > size() + growth_left()) {
size_t m = GrowthToLowerboundCapacity(n);
- resize(NormalizeCapacity(m));
+ resize(NormalizeCapacity(m));
// This is after resize, to ensure that we have completed the allocation
// and have potentially sampled the hashtable.
infoz().RecordReservation(n);
- }
- }
+ }
+ }
// Extension API: support for heterogeneous keys.
//
@@ -1447,7 +1447,7 @@ class raw_hash_set {
(void)key;
#if defined(__GNUC__)
prefetch_heap_block();
- auto seq = probe(ctrl_, hash_ref()(key), capacity_);
+ auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
@@ -1462,7 +1462,7 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1626,7 +1626,7 @@ class raw_hash_set {
SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
capacity_, ctrl_, slots_, sizeof(slot_type));
growth_left() += was_never_full;
- infoz().RecordErase();
+ infoz().RecordErase();
}
void initialize_slots() {
@@ -1654,7 +1654,7 @@ class raw_hash_set {
mem + SlotOffset(capacity_, alignof(slot_type)));
ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
- infoz().RecordStorageChanged(size_, capacity_);
+ infoz().RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1690,7 +1690,7 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
@@ -1704,12 +1704,12 @@ class raw_hash_set {
&alloc_ref(), old_ctrl,
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
- infoz().RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
- assert(!is_small(capacity_));
+ assert(!is_small(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1770,7 +1770,7 @@ class raw_hash_set {
}
}
reset_growth_left();
- infoz().RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
@@ -1829,7 +1829,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1861,7 +1861,7 @@ class raw_hash_set {
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
auto hash = hash_ref()(key);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1878,17 +1878,17 @@ class raw_hash_set {
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(ctrl_, hash, capacity_);
+ target = find_first_non_full(ctrl_, hash, capacity_);
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
sizeof(slot_type));
- infoz().RecordInsert(hash, target.probe_length);
+ infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1931,15 +1931,15 @@ class raw_hash_set {
#endif // __GNUC__
}
- HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
+ HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
- hasher& hash_ref() { return settings_.template get<2>(); }
- const hasher& hash_ref() const { return settings_.template get<2>(); }
- key_equal& eq_ref() { return settings_.template get<3>(); }
- const key_equal& eq_ref() const { return settings_.template get<3>(); }
- allocator_type& alloc_ref() { return settings_.template get<4>(); }
+ hasher& hash_ref() { return settings_.template get<2>(); }
+ const hasher& hash_ref() const { return settings_.template get<2>(); }
+ key_equal& eq_ref() { return settings_.template get<3>(); }
+ const key_equal& eq_ref() const { return settings_.template get<3>(); }
+ allocator_type& alloc_ref() { return settings_.template get<4>(); }
const allocator_type& alloc_ref() const {
- return settings_.template get<4>();
+ return settings_.template get<4>();
}
// TODO(alkis): Investigate removing some of these fields:
@@ -1949,11 +1949,11 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
- absl::container_internal::CompressedTuple<size_t /* growth_left */,
- HashtablezInfoHandle, hasher,
+ absl::container_internal::CompressedTuple<size_t /* growth_left */,
+ HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
- allocator_type{}};
+ settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
@@ -1978,7 +1978,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = probe(set.ctrl_, hash, set.capacity_);
+ auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
index 28951c5549..3fe7e7b5c0 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
@@ -1,29 +1,29 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
contrib/restricted/abseil-cpp/absl/debugging
contrib/restricted/abseil-cpp/absl/debugging/stacktrace
contrib/restricted/abseil-cpp/absl/debugging/symbolize
contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/hash
- contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/numeric
contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
- contrib/restricted/abseil-cpp/absl/strings
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
contrib/restricted/abseil-cpp/absl/synchronization
contrib/restricted/abseil-cpp/absl/synchronization/internal
@@ -31,25 +31,25 @@ PEERDIR(
contrib/restricted/abseil-cpp/absl/time/civil_time
contrib/restricted/abseil-cpp/absl/time/time_zone
contrib/restricted/abseil-cpp/absl/types
- contrib/restricted/abseil-cpp/absl/types/bad_optional_access
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- -DNOMINMAX
-)
-
+ contrib/restricted/abseil-cpp/absl/types/bad_optional_access
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
-
-SRCS(
+
+SRCS(
raw_hash_set.cc
-)
-
-END()
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index f1f7369ff3..c1d20f3c52 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -16,7 +16,7 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#include <algorithm>
-#include <unordered_map>
+#include <unordered_map>
#include <vector>
#include "gmock/gmock.h"