aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/container/internal
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-02-10 16:45:08 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:08 +0300
commit4e839db24a3bbc9f1c610c43d6faaaa99824dcca (patch)
tree506dac10f5df94fab310584ee51b24fc5a081c22 /contrib/restricted/abseil-cpp/absl/container/internal
parent2d37894b1b037cf24231090eda8589bbb44fb6fc (diff)
downloadydb-4e839db24a3bbc9f1c610c43d6faaaa99824dcca.tar.gz
Restoring authorship annotation for <thegeorg@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/container/internal')
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make30
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree.h74
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h34
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h42
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc36
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h74
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h790
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc28
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h502
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make32
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h42
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h72
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h66
15 files changed, 921 insertions, 921 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
index 1933289a6d..8dedf52f5a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler/ya.make
@@ -10,24 +10,24 @@ LICENSE(Apache-2.0)
PEERDIR(
contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
contrib/restricted/abseil-cpp/absl/base/log_severity
- contrib/restricted/abseil-cpp/absl/debugging
- contrib/restricted/abseil-cpp/absl/debugging/stacktrace
- contrib/restricted/abseil-cpp/absl/debugging/symbolize
- contrib/restricted/abseil-cpp/absl/demangle
+ contrib/restricted/abseil-cpp/absl/debugging
+ contrib/restricted/abseil-cpp/absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp/absl/debugging/symbolize
+ contrib/restricted/abseil-cpp/absl/demangle
contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
contrib/restricted/abseil-cpp/absl/strings
- contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
- contrib/restricted/abseil-cpp/absl/synchronization
- contrib/restricted/abseil-cpp/absl/synchronization/internal
- contrib/restricted/abseil-cpp/absl/time
- contrib/restricted/abseil-cpp/absl/time/civil_time
- contrib/restricted/abseil-cpp/absl/time/time_zone
+ contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp/absl/synchronization
+ contrib/restricted/abseil-cpp/absl/synchronization/internal
+ contrib/restricted/abseil-cpp/absl/time
+ contrib/restricted/abseil-cpp/absl/time/civil_time
+ contrib/restricted/abseil-cpp/absl/time/time_zone
)
ADDINCL(
@@ -42,11 +42,11 @@ CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
+SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
SRCS(
- hashtablez_sampler.cc
- hashtablez_sampler_force_weak_definition.cc
+ hashtablez_sampler.cc
+ hashtablez_sampler_force_weak_definition.cc
)
END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
index f636c5fc73..ec2a5622d8 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree.h
@@ -88,13 +88,13 @@ struct StringBtreeDefaultLess {
// Compatibility constructor.
StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
- StringBtreeDefaultLess(std::less<absl::string_view>) {} // NOLINT
-
- // Allow converting to std::less for use in key_comp()/value_comp().
- explicit operator std::less<std::string>() const { return {}; }
- explicit operator std::less<absl::string_view>() const { return {}; }
- explicit operator std::less<absl::Cord>() const { return {}; }
+ StringBtreeDefaultLess(std::less<absl::string_view>) {} // NOLINT
+ // Allow converting to std::less for use in key_comp()/value_comp().
+ explicit operator std::less<std::string>() const { return {}; }
+ explicit operator std::less<absl::string_view>() const { return {}; }
+ explicit operator std::less<absl::Cord>() const { return {}; }
+
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
@@ -120,13 +120,13 @@ struct StringBtreeDefaultGreater {
StringBtreeDefaultGreater() = default;
StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
- StringBtreeDefaultGreater(std::greater<absl::string_view>) {} // NOLINT
-
- // Allow converting to std::greater for use in key_comp()/value_comp().
- explicit operator std::greater<std::string>() const { return {}; }
- explicit operator std::greater<absl::string_view>() const { return {}; }
- explicit operator std::greater<absl::Cord>() const { return {}; }
+ StringBtreeDefaultGreater(std::greater<absl::string_view>) {} // NOLINT
+ // Allow converting to std::greater for use in key_comp()/value_comp().
+ explicit operator std::greater<std::string>() const { return {}; }
+ explicit operator std::greater<absl::string_view>() const { return {}; }
+ explicit operator std::greater<absl::Cord>() const { return {}; }
+
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
@@ -227,8 +227,8 @@ struct prefers_linear_node_search<
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
- using original_key_compare = Compare;
-
+ using original_key_compare = Compare;
+
// If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type;
@@ -329,21 +329,21 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
using value_type = typename super_type::value_type;
using init_type = typename super_type::init_type;
- using original_key_compare = typename super_type::original_key_compare;
- // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
- class value_compare {
- template <typename Params>
- friend class btree;
-
- protected:
- explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
-
- original_key_compare comp; // NOLINT
-
- public:
- auto operator()(const value_type &lhs, const value_type &rhs) const
- -> decltype(comp(lhs.first, rhs.first)) {
- return comp(lhs.first, rhs.first);
+ using original_key_compare = typename super_type::original_key_compare;
+ // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
+ class value_compare {
+ template <typename Params>
+ friend class btree;
+
+ protected:
+ explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
+
+ original_key_compare comp; // NOLINT
+
+ public:
+ auto operator()(const value_type &lhs, const value_type &rhs) const
+ -> decltype(comp(lhs.first, rhs.first)) {
+ return comp(lhs.first, rhs.first);
}
};
using is_map_container = std::true_type;
@@ -409,8 +409,8 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
set_slot_policy<Key>> {
using value_type = Key;
using slot_type = typename set_params::common_params::slot_type;
- using value_compare =
- typename set_params::common_params::original_key_compare;
+ using value_compare =
+ typename set_params::common_params::original_key_compare;
using is_map_container = std::false_type;
template <typename V>
@@ -502,8 +502,8 @@ class btree_node {
std::is_same<std::greater<key_type>,
key_compare>::value)>;
- // This class is organized by absl::container_internal::Layout as if it had
- // the following structure:
+ // This class is organized by absl::container_internal::Layout as if it had
+ // the following structure:
// // A pointer to the node's parent.
// btree_node *parent;
//
@@ -1147,7 +1147,7 @@ class btree {
using size_type = typename Params::size_type;
using difference_type = typename Params::difference_type;
using key_compare = typename Params::key_compare;
- using original_key_compare = typename Params::original_key_compare;
+ using original_key_compare = typename Params::original_key_compare;
using value_compare = typename Params::value_compare;
using allocator_type = typename Params::allocator_type;
using reference = typename Params::reference;
@@ -1357,9 +1357,9 @@ class btree {
return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
- value_compare value_comp() const {
- return value_compare(original_key_compare(key_comp()));
- }
+ value_compare value_comp() const {
+ return value_compare(original_key_compare(key_comp()));
+ }
// Verifies the structure of the btree.
void verify() const;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
index a99668c713..c569ab1489 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/btree_container.h
@@ -20,7 +20,7 @@
#include <iterator>
#include <utility>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
@@ -52,7 +52,7 @@ class btree_container {
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
using difference_type = typename Tree::difference_type;
- using key_compare = typename Tree::original_key_compare;
+ using key_compare = typename Tree::original_key_compare;
using value_compare = typename Tree::value_compare;
using allocator_type = typename Tree::allocator_type;
using reference = typename Tree::reference;
@@ -177,7 +177,7 @@ class btree_container {
}
// Utility routines.
- ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); }
void verify() const { tree_.verify(); }
@@ -215,7 +215,7 @@ class btree_container {
allocator_type get_allocator() const { return tree_.get_allocator(); }
// The key comparator used by the btree.
- key_compare key_comp() const { return key_compare(tree_.key_comp()); }
+ key_compare key_comp() const { return key_compare(tree_.key_comp()); }
value_compare value_comp() const { return tree_.value_comp(); }
// Support absl::Hash.
@@ -248,7 +248,7 @@ class btree_set_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::original_key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -399,7 +399,7 @@ class btree_map_container : public btree_set_container<Tree> {
using key_type = typename Tree::key_type;
using mapped_type = typename params_type::mapped_type;
using value_type = typename Tree::value_type;
- using key_compare = typename Tree::original_key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -544,7 +544,7 @@ class btree_multiset_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::original_key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
index 250e662c9d..d3c650fb97 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -78,26 +78,26 @@ struct StringHash {
}
};
-struct StringEq {
- using is_transparent = void;
- bool operator()(absl::string_view lhs, absl::string_view rhs) const {
- return lhs == rhs;
- }
- bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
- return lhs == rhs;
- }
- bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
- return lhs == rhs;
- }
- bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
- return lhs == rhs;
- }
-};
-
+struct StringEq {
+ using is_transparent = void;
+ bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+};
+
// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
- using Eq = StringEq;
+ using Eq = StringEq;
};
template <>
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
index f1f555a5c1..0fd1d161a0 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -21,13 +21,13 @@
#include <stdint.h>
#include <algorithm>
-#include <cassert>
+#include <cassert>
#include <iosfwd>
#include <random>
#include <tuple>
#include <type_traits>
#include <utility>
-#include <vector>
+#include <vector>
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/memory/memory.h"
@@ -155,25 +155,25 @@ using GeneratedType = decltype(
typename Container::value_type,
typename Container::key_type>::type>&>()());
-// Naive wrapper that performs a linear search of previous values.
-// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
-template <class T, size_t kMaxValues = 64, class E = void>
-struct UniqueGenerator {
- Generator<T, E> gen;
- std::vector<T> values;
-
- T operator()() {
- assert(values.size() < kMaxValues);
- for (;;) {
- T value = gen();
- if (std::find(values.begin(), values.end(), value) == values.end()) {
- values.push_back(value);
- return value;
- }
- }
- }
-};
-
+// Naive wrapper that performs a linear search of previous values.
+// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
+template <class T, size_t kMaxValues = 64, class E = void>
+struct UniqueGenerator {
+ Generator<T, E> gen;
+ std::vector<T> values;
+
+ T operator()() {
+ assert(values.size() < kMaxValues);
+ for (;;) {
+ T value = gen();
+ if (std::find(values.begin(), values.end(), value) == values.end()) {
+ values.push_back(value);
+ return value;
+ }
+ }
+ }
+};
+
} // namespace hash_internal
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index 40cce0479e..d283d98d9e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -24,8 +24,8 @@
#include "absl/container/internal/have_sse.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
-#include "absl/profiling/internal/exponential_biased.h"
-#include "absl/profiling/internal/sample_recorder.h"
+#include "absl/profiling/internal/exponential_biased.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@@ -40,7 +40,7 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
+ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
@@ -50,14 +50,14 @@ ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-HashtablezSampler& GlobalHashtablezSampler() {
+HashtablezSampler& GlobalHashtablezSampler() {
static auto* sampler = new HashtablezSampler();
return *sampler;
}
-// TODO(bradleybear): The comments at this constructors declaration say that the
-// fields are not initialized, but this definition does initialize the fields.
-// Something needs to be cleaned up.
+// TODO(bradleybear): The comments at this constructors declaration say that the
+// fields are not initialized, but this definition does initialize the fields.
+// Something needs to be cleaned up.
HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
HashtablezInfo::~HashtablezInfo() = default;
@@ -71,7 +71,7 @@ void HashtablezInfo::PrepareForSampling() {
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
hashes_bitwise_xor.store(0, std::memory_order_relaxed);
- max_reserve.store(0, std::memory_order_relaxed);
+ max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
// The inliner makes hardcoded skip_count difficult (especially when combined
@@ -101,12 +101,12 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
- return result;
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -128,17 +128,17 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
- return SampleSlow(next_sample, inline_element_size);
+ return SampleSlow(next_sample, inline_element_size);
}
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
- return result;
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
- GlobalHashtablezSampler().Unregister(info);
+ GlobalHashtablezSampler().Unregister(info);
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -178,7 +178,7 @@ void SetHashtablezSampleParameter(int32_t rate) {
void SetHashtablezMaxSamples(int32_t max) {
if (max > 0) {
- GlobalHashtablezSampler().SetMaxSamples(max);
+ GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
static_cast<long long>(max)); // NOLINT(runtime/int)
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 91fcdb34a3..7919c05079 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -47,7 +47,7 @@
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/have_sse.h"
-#include "absl/profiling/internal/sample_recorder.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@@ -58,7 +58,7 @@ namespace container_internal {
// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
-struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
+struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
@@ -80,7 +80,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
- std::atomic<size_t> max_reserve;
+ std::atomic<size_t> max_reserve;
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
@@ -91,7 +91,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
- size_t inline_element_size;
+ size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
@@ -109,18 +109,18 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
std::memory_order_relaxed);
}
-inline void RecordReservationSlow(HashtablezInfo* info,
- size_t target_capacity) {
- info->max_reserve.store(
- (std::max)(info->max_reserve.load(std::memory_order_relaxed),
- target_capacity),
- std::memory_order_relaxed);
-}
-
-inline void RecordClearedReservationSlow(HashtablezInfo* info) {
- info->max_reserve.store(0, std::memory_order_relaxed);
-}
-
+inline void RecordReservationSlow(HashtablezInfo* info,
+ size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+inline void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
size_t capacity) {
info->size.store(size, std::memory_order_relaxed);
@@ -144,7 +144,7 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -184,16 +184,16 @@ class HashtablezInfoHandle {
RecordRehashSlow(info_, total_probe_length);
}
- inline void RecordReservation(size_t target_capacity) {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordReservationSlow(info_, target_capacity);
- }
-
- inline void RecordClearedReservation() {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordClearedReservationSlow(info_);
- }
-
+ inline void RecordReservation(size_t target_capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordReservationSlow(info_, target_capacity);
+ }
+
+ inline void RecordClearedReservation() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordClearedReservationSlow(info_);
+ }
+
inline void RecordInsert(size_t hash, size_t distance_from_desired) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordInsertSlow(info_, hash, distance_from_desired);
@@ -223,8 +223,8 @@ class HashtablezInfoHandle {
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
- inline void RecordReservation(size_t /*target_capacity*/) {}
- inline void RecordClearedReservation() {}
+ inline void RecordReservation(size_t /*target_capacity*/) {}
+ inline void RecordClearedReservation() {}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
inline void RecordErase() {}
@@ -239,24 +239,24 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
-inline HashtablezInfoHandle Sample(
- size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
+inline HashtablezInfoHandle Sample(
+ size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(
- SampleSlow(&global_next_sample, inline_element_size));
+ return HashtablezInfoHandle(
+ SampleSlow(&global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
}
-using HashtablezSampler =
- ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
+using HashtablezSampler =
+ ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
-// Returns a global Sampler.
-HashtablezSampler& GlobalHashtablezSampler();
+// Returns a global Sampler.
+HashtablezSampler& GlobalHashtablezSampler();
// Enables or disables sampling for Swiss tables.
void SetHashtablezEnabled(bool enabled);
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 1d7d6cda72..f43d5e500d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -21,11 +21,11 @@
#include <iterator>
#include <limits>
#include <memory>
-#include <new>
-#include <type_traits>
+#include <new>
+#include <type_traits>
#include <utility>
-#include "absl/base/attributes.h"
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -39,132 +39,132 @@ namespace inlined_vector_internal {
// GCC does not deal very well with the below code
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
-template <typename A>
-using AllocatorTraits = std::allocator_traits<A>;
-template <typename A>
-using ValueType = typename AllocatorTraits<A>::value_type;
-template <typename A>
-using SizeType = typename AllocatorTraits<A>::size_type;
-template <typename A>
-using Pointer = typename AllocatorTraits<A>::pointer;
-template <typename A>
-using ConstPointer = typename AllocatorTraits<A>::const_pointer;
-template <typename A>
-using SizeType = typename AllocatorTraits<A>::size_type;
-template <typename A>
-using DifferenceType = typename AllocatorTraits<A>::difference_type;
-template <typename A>
-using Reference = ValueType<A>&;
-template <typename A>
-using ConstReference = const ValueType<A>&;
-template <typename A>
-using Iterator = Pointer<A>;
-template <typename A>
-using ConstIterator = ConstPointer<A>;
-template <typename A>
-using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
-template <typename A>
-using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
-template <typename A>
-using MoveIterator = typename std::move_iterator<Iterator<A>>;
-
+template <typename A>
+using AllocatorTraits = std::allocator_traits<A>;
+template <typename A>
+using ValueType = typename AllocatorTraits<A>::value_type;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using Pointer = typename AllocatorTraits<A>::pointer;
+template <typename A>
+using ConstPointer = typename AllocatorTraits<A>::const_pointer;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using DifferenceType = typename AllocatorTraits<A>::difference_type;
+template <typename A>
+using Reference = ValueType<A>&;
+template <typename A>
+using ConstReference = const ValueType<A>&;
+template <typename A>
+using Iterator = Pointer<A>;
+template <typename A>
+using ConstIterator = ConstPointer<A>;
+template <typename A>
+using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
+template <typename A>
+using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
+template <typename A>
+using MoveIterator = typename std::move_iterator<Iterator<A>>;
+
template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>;
-template <typename A>
+template <typename A>
using IsMemcpyOk =
- absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
- absl::is_trivially_copy_constructible<ValueType<A>>,
- absl::is_trivially_copy_assignable<ValueType<A>>,
- absl::is_trivially_destructible<ValueType<A>>>;
-
-template <typename T>
-struct TypeIdentity {
- using type = T;
-};
-
-// Used for function arguments in template functions to prevent ADL by forcing
-// callers to explicitly specify the template parameter.
-template <typename T>
-using NoTypeDeduction = typename TypeIdentity<T>::type;
-
-template <typename A>
-void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
- SizeType<A> destroy_size) {
+ absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
+ absl::is_trivially_copy_constructible<ValueType<A>>,
+ absl::is_trivially_copy_assignable<ValueType<A>>,
+ absl::is_trivially_destructible<ValueType<A>>>;
+
+template <typename T>
+struct TypeIdentity {
+ using type = T;
+};
+
+// Used for function arguments in template functions to prevent ADL by forcing
+// callers to explicitly specify the template parameter.
+template <typename T>
+using NoTypeDeduction = typename TypeIdentity<T>::type;
+
+template <typename A>
+void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
if (destroy_first != nullptr) {
- for (SizeType<A> i = destroy_size; i != 0;) {
+ for (SizeType<A> i = destroy_size; i != 0;) {
--i;
- AllocatorTraits<A>::destroy(allocator, destroy_first + i);
+ AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
}
}
-template <typename A>
-struct Allocation {
- Pointer<A> data;
- SizeType<A> capacity;
-};
-
-template <typename A,
- bool IsOverAligned =
- (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
-struct MallocAdapter {
- static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
- return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
- requested_capacity};
- }
-
- static void Deallocate(A& allocator, Pointer<A> pointer,
- SizeType<A> capacity) {
- AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
- }
-};
-
-template <typename A, typename ValueAdapter>
-void ConstructElements(NoTypeDeduction<A>& allocator,
- Pointer<A> construct_first, ValueAdapter& values,
- SizeType<A> construct_size) {
- for (SizeType<A> i = 0; i < construct_size; ++i) {
- ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
+template <typename A>
+struct Allocation {
+ Pointer<A> data;
+ SizeType<A> capacity;
+};
+
+template <typename A,
+ bool IsOverAligned =
+ (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+ static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+ return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+ requested_capacity};
+ }
+
+ static void Deallocate(A& allocator, Pointer<A> pointer,
+ SizeType<A> capacity) {
+ AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+ }
+};
+
+template <typename A, typename ValueAdapter>
+void ConstructElements(NoTypeDeduction<A>& allocator,
+ Pointer<A> construct_first, ValueAdapter& values,
+ SizeType<A> construct_size) {
+ for (SizeType<A> i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
ABSL_INTERNAL_CATCH_ANY {
- DestroyElements<A>(allocator, construct_first, i);
+ DestroyElements<A>(allocator, construct_first, i);
ABSL_INTERNAL_RETHROW;
}
}
}
-template <typename A, typename ValueAdapter>
-void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
- SizeType<A> assign_size) {
- for (SizeType<A> i = 0; i < assign_size; ++i) {
- values.AssignNext(assign_first + i);
+template <typename A, typename ValueAdapter>
+void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
+ SizeType<A> assign_size) {
+ for (SizeType<A> i = 0; i < assign_size; ++i) {
+ values.AssignNext(assign_first + i);
}
}
-template <typename A>
+template <typename A>
struct StorageView {
- Pointer<A> data;
- SizeType<A> size;
- SizeType<A> capacity;
+ Pointer<A> data;
+ SizeType<A> size;
+ SizeType<A> capacity;
};
-template <typename A, typename Iterator>
+template <typename A, typename Iterator>
class IteratorValueAdapter {
public:
explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
- void ConstructNext(A& allocator, Pointer<A> construct_at) {
- AllocatorTraits<A>::construct(allocator, construct_at, *it_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *it_);
++it_;
}
- void AssignNext(Pointer<A> assign_at) {
+ void AssignNext(Pointer<A> assign_at) {
*assign_at = *it_;
++it_;
}
@@ -173,123 +173,123 @@ class IteratorValueAdapter {
Iterator it_;
};
-template <typename A>
+template <typename A>
class CopyValueAdapter {
public:
- explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
+ explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
- void ConstructNext(A& allocator, Pointer<A> construct_at) {
- AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
}
- void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
private:
- ConstPointer<A> ptr_;
+ ConstPointer<A> ptr_;
};
-template <typename A>
+template <typename A>
class DefaultValueAdapter {
public:
explicit DefaultValueAdapter() {}
- void ConstructNext(A& allocator, Pointer<A> construct_at) {
- AllocatorTraits<A>::construct(allocator, construct_at);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at);
}
- void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
};
-template <typename A>
+template <typename A>
class AllocationTransaction {
public:
- explicit AllocationTransaction(A& allocator)
- : allocator_data_(allocator, nullptr), capacity_(0) {}
+ explicit AllocationTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), capacity_(0) {}
~AllocationTransaction() {
if (DidAllocate()) {
- MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
}
}
AllocationTransaction(const AllocationTransaction&) = delete;
void operator=(const AllocationTransaction&) = delete;
- A& GetAllocator() { return allocator_data_.template get<0>(); }
- Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
- SizeType<A>& GetCapacity() { return capacity_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetCapacity() { return capacity_; }
bool DidAllocate() { return GetData() != nullptr; }
-
- Pointer<A> Allocate(SizeType<A> requested_capacity) {
- Allocation<A> result =
- MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
- GetData() = result.data;
- GetCapacity() = result.capacity;
- return result.data;
- }
-
- ABSL_MUST_USE_RESULT Allocation<A> Release() && {
- Allocation<A> result = {GetData(), GetCapacity()};
- Reset();
- return result;
- }
-
- private:
+
+ Pointer<A> Allocate(SizeType<A> requested_capacity) {
+ Allocation<A> result =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ GetData() = result.data;
+ GetCapacity() = result.capacity;
+ return result.data;
+ }
+
+ ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ Allocation<A> result = {GetData(), GetCapacity()};
+ Reset();
+ return result;
+ }
+
+ private:
void Reset() {
GetData() = nullptr;
GetCapacity() = 0;
}
- container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
- SizeType<A> capacity_;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> capacity_;
};
-template <typename A>
+template <typename A>
class ConstructionTransaction {
public:
- explicit ConstructionTransaction(A& allocator)
- : allocator_data_(allocator, nullptr), size_(0) {}
+ explicit ConstructionTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), size_(0) {}
~ConstructionTransaction() {
if (DidConstruct()) {
- DestroyElements<A>(GetAllocator(), GetData(), GetSize());
+ DestroyElements<A>(GetAllocator(), GetData(), GetSize());
}
}
ConstructionTransaction(const ConstructionTransaction&) = delete;
void operator=(const ConstructionTransaction&) = delete;
- A& GetAllocator() { return allocator_data_.template get<0>(); }
- Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
- SizeType<A>& GetSize() { return size_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetSize() { return size_; }
bool DidConstruct() { return GetData() != nullptr; }
template <typename ValueAdapter>
- void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
- ConstructElements<A>(GetAllocator(), data, values, size);
+ void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
+ ConstructElements<A>(GetAllocator(), data, values, size);
GetData() = data;
GetSize() = size;
}
- void Commit() && {
+ void Commit() && {
GetData() = nullptr;
GetSize() = 0;
}
private:
- container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
- SizeType<A> size_;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> size_;
};
template <typename T, size_t N, typename A>
class Storage {
public:
- static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
+ static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
return current_capacity * 2;
}
- static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
- SizeType<A> requested_capacity) {
+ static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
+ SizeType<A> requested_capacity) {
return (std::max)(NextCapacity(current_capacity), requested_capacity);
}
@@ -297,15 +297,15 @@ class Storage {
// Storage Constructors and Destructor
// ---------------------------------------------------------------------------
- Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
+ Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
- explicit Storage(const A& allocator)
- : metadata_(allocator, /* size and is_allocated */ 0) {}
+ explicit Storage(const A& allocator)
+ : metadata_(allocator, /* size and is_allocated */ 0) {}
~Storage() {
if (GetSizeAndIsAllocated() == 0) {
// Empty and not allocated; nothing to do.
- } else if (IsMemcpyOk<A>::value) {
+ } else if (IsMemcpyOk<A>::value) {
// No destructors need to be run; just deallocate if necessary.
DeallocateIfAllocated();
} else {
@@ -317,48 +317,48 @@ class Storage {
// Storage Member Accessors
// ---------------------------------------------------------------------------
- SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+ SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
- const SizeType<A>& GetSizeAndIsAllocated() const {
+ const SizeType<A>& GetSizeAndIsAllocated() const {
return metadata_.template get<1>();
}
- SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+ SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
- Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
+ Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
- ConstPointer<A> GetAllocatedData() const {
+ ConstPointer<A> GetAllocatedData() const {
return data_.allocated.allocated_data;
}
- Pointer<A> GetInlinedData() {
- return reinterpret_cast<Pointer<A>>(
+ Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- ConstPointer<A> GetInlinedData() const {
- return reinterpret_cast<ConstPointer<A>>(
+ ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- SizeType<A> GetAllocatedCapacity() const {
+ SizeType<A> GetAllocatedCapacity() const {
return data_.allocated.allocated_capacity;
}
- SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
+ SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
- StorageView<A> MakeStorageView() {
- return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()}
- : StorageView<A>{GetInlinedData(), GetSize(),
- GetInlinedCapacity()};
+ StorageView<A> MakeStorageView() {
+ return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView<A>{GetInlinedData(), GetSize(),
+ GetInlinedCapacity()};
}
- A& GetAllocator() { return metadata_.template get<0>(); }
+ A& GetAllocator() { return metadata_.template get<0>(); }
- const A& GetAllocator() const { return metadata_.template get<0>(); }
+ const A& GetAllocator() const { return metadata_.template get<0>(); }
// ---------------------------------------------------------------------------
// Storage Member Mutators
@@ -367,67 +367,67 @@ class Storage {
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
template <typename ValueAdapter>
- void Initialize(ValueAdapter values, SizeType<A> new_size);
+ void Initialize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Assign(ValueAdapter values, SizeType<A> new_size);
+ void Assign(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Resize(ValueAdapter values, SizeType<A> new_size);
+ void Resize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
- SizeType<A> insert_count);
+ Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count);
template <typename... Args>
- Reference<A> EmplaceBack(Args&&... args);
+ Reference<A> EmplaceBack(Args&&... args);
- Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
+ Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
- void Reserve(SizeType<A> requested_capacity);
+ void Reserve(SizeType<A> requested_capacity);
void ShrinkToFit();
void Swap(Storage* other_storage_ptr);
void SetIsAllocated() {
- GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
+ GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
}
void UnsetIsAllocated() {
- GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
}
- void SetSize(SizeType<A> size) {
+ void SetSize(SizeType<A> size) {
GetSizeAndIsAllocated() =
- (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
+ (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
}
- void SetAllocatedSize(SizeType<A> size) {
- GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
+ void SetAllocatedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
}
- void SetInlinedSize(SizeType<A> size) {
- GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
+ void SetInlinedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
}
- void AddSize(SizeType<A> count) {
- GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
+ void AddSize(SizeType<A> count) {
+ GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
}
- void SubtractSize(SizeType<A> count) {
+ void SubtractSize(SizeType<A> count) {
assert(count <= GetSize());
- GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
+ GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
- void SetAllocation(Allocation<A> allocation) {
- data_.allocated.allocated_data = allocation.data;
- data_.allocated.allocated_capacity = allocation.capacity;
+ void SetAllocation(Allocation<A> allocation) {
+ data_.allocated.allocated_data = allocation.data;
+ data_.allocated.allocated_capacity = allocation.capacity;
}
void MemcpyFrom(const Storage& other_storage) {
- assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
+ assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -435,23 +435,23 @@ class Storage {
void DeallocateIfAllocated() {
if (GetIsAllocated()) {
- MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
- GetAllocatedCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+ GetAllocatedCapacity());
}
}
private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
- using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
+ using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
- Pointer<A> allocated_data;
- SizeType<A> allocated_capacity;
+ Pointer<A> allocated_data;
+ SizeType<A> allocated_capacity;
};
struct Inlined {
- alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
+ alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
};
union Data {
@@ -460,7 +460,7 @@ class Storage {
};
template <typename... Args>
- ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
+ ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
Metadata metadata_;
Data data_;
@@ -468,17 +468,17 @@ class Storage {
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
- Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
- DestroyElements<A>(GetAllocator(), data, GetSize());
+ Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ DestroyElements<A>(GetAllocator(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
- const SizeType<A> n = other.GetSize();
+ const SizeType<A> n = other.GetSize();
assert(n > 0); // Empty sources handled handled in caller.
- ConstPointer<A> src;
- Pointer<A> dst;
+ ConstPointer<A> src;
+ Pointer<A> dst;
if (!other.GetIsAllocated()) {
dst = GetInlinedData();
src = other.GetInlinedData();
@@ -486,48 +486,48 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
- Allocation<A> allocation =
- MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
- SetAllocation(allocation);
- dst = allocation.data;
+ SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ SetAllocation(allocation);
+ dst = allocation.data;
src = other.GetAllocatedData();
}
- if (IsMemcpyOk<A>::value) {
- std::memcpy(reinterpret_cast<char*>(dst),
- reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
+ if (IsMemcpyOk<A>::value) {
+ std::memcpy(reinterpret_cast<char*>(dst),
+ reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
} else {
- auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
- ConstructElements<A>(GetAllocator(), dst, values, n);
+ auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
+ ConstructElements<A>(GetAllocator(), dst, values, n);
}
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
+auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
// Only callable from constructors!
assert(!GetIsAllocated());
assert(GetSize() == 0);
- Pointer<A> construct_data;
+ Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- SizeType<A> requested_capacity =
- ComputeCapacity(GetInlinedCapacity(), new_size);
- Allocation<A> allocation =
- MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
- construct_data = allocation.data;
- SetAllocation(allocation);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(GetInlinedCapacity(), new_size);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ construct_data = allocation.data;
+ SetAllocation(allocation);
SetIsAllocated();
} else {
construct_data = GetInlinedData();
}
- ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
+ ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
// Since the initial size was guaranteed to be `0` and the allocated bit is
// already correct for either case, *adding* `new_size` gives us the correct
@@ -537,20 +537,20 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
- -> void {
- StorageView<A> storage_view = MakeStorageView();
+auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
- AllocationTransaction<A> allocation_tx(GetAllocator());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- absl::Span<ValueType<A>> assign_loop;
- absl::Span<ValueType<A>> construct_loop;
- absl::Span<ValueType<A>> destroy_loop;
+ absl::Span<ValueType<A>> assign_loop;
+ absl::Span<ValueType<A>> construct_loop;
+ absl::Span<ValueType<A>> destroy_loop;
if (new_size > storage_view.capacity) {
- SizeType<A> requested_capacity =
- ComputeCapacity(storage_view.capacity, new_size);
- construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
assign_loop = {storage_view.data, storage_view.size};
@@ -561,16 +561,16 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
- AssignElements<A>(assign_loop.data(), values, assign_loop.size());
+ AssignElements<A>(assign_loop.data(), values, assign_loop.size());
- ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
- construct_loop.size());
+ ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
+ construct_loop.size());
- DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
+ DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -579,18 +579,18 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
- -> void {
- StorageView<A> storage_view = MakeStorageView();
- Pointer<A> const base = storage_view.data;
- const SizeType<A> size = storage_view.size;
- A& alloc = GetAllocator();
+auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
+ Pointer<A> const base = storage_view.data;
+ const SizeType<A> size = storage_view.size;
+ A& alloc = GetAllocator();
if (new_size <= size) {
// Destroy extra old elements.
- DestroyElements<A>(alloc, base + new_size, size - new_size);
+ DestroyElements<A>(alloc, base + new_size, size - new_size);
} else if (new_size <= storage_view.capacity) {
// Construct new elements in place.
- ConstructElements<A>(alloc, base + size, values, new_size - size);
+ ConstructElements<A>(alloc, base + size, values, new_size - size);
} else {
// Steps:
// a. Allocate new backing store.
@@ -599,22 +599,22 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
- AllocationTransaction<A> allocation_tx(alloc);
- SizeType<A> requested_capacity =
- ComputeCapacity(storage_view.capacity, new_size);
- Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
+ AllocationTransaction<A> allocation_tx(alloc);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
- ConstructionTransaction<A> construction_tx(alloc);
- construction_tx.Construct(new_data + size, values, new_size - size);
+ ConstructionTransaction<A> construction_tx(alloc);
+ construction_tx.Construct(new_data + size, values, new_size - size);
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- (MoveIterator<A>(base)));
- ConstructElements<A>(alloc, new_data, move_values, size);
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ (MoveIterator<A>(base)));
+ ConstructElements<A>(alloc, new_data, move_values, size);
- DestroyElements<A>(alloc, base, size);
- std::move(construction_tx).Commit();
+ DestroyElements<A>(alloc, base, size);
+ std::move(construction_tx).Commit();
DeallocateIfAllocated();
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
SetSize(new_size);
@@ -622,76 +622,76 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
- SizeType<A> insert_count) -> Iterator<A> {
- StorageView<A> storage_view = MakeStorageView();
+auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count) -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- SizeType<A> insert_index =
- std::distance(ConstIterator<A>(storage_view.data), pos);
- SizeType<A> insert_end_index = insert_index + insert_count;
- SizeType<A> new_size = storage_view.size + insert_count;
+ SizeType<A> insert_index =
+ std::distance(ConstIterator<A>(storage_view.data), pos);
+ SizeType<A> insert_end_index = insert_index + insert_count;
+ SizeType<A> new_size = storage_view.size + insert_count;
if (new_size > storage_view.capacity) {
- AllocationTransaction<A> allocation_tx(GetAllocator());
- ConstructionTransaction<A> construction_tx(GetAllocator());
- ConstructionTransaction<A> move_construction_tx(GetAllocator());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ ConstructionTransaction<A> construction_tx(GetAllocator());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- SizeType<A> requested_capacity =
- ComputeCapacity(storage_view.capacity, new_size);
- Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
- construction_tx.Construct(new_data + insert_index, values, insert_count);
+ construction_tx.Construct(new_data + insert_index, values, insert_count);
- move_construction_tx.Construct(new_data, move_values, insert_index);
+ move_construction_tx.Construct(new_data, move_values, insert_index);
- ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
- move_values, storage_view.size - insert_index);
+ ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
+ move_values, storage_view.size - insert_index);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- std::move(construction_tx).Commit();
- std::move(move_construction_tx).Commit();
+ std::move(construction_tx).Commit();
+ std::move(move_construction_tx).Commit();
DeallocateIfAllocated();
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
SetAllocatedSize(new_size);
- return Iterator<A>(new_data + insert_index);
+ return Iterator<A>(new_data + insert_index);
} else {
- SizeType<A> move_construction_destination_index =
+ SizeType<A> move_construction_destination_index =
(std::max)(insert_end_index, storage_view.size);
- ConstructionTransaction<A> move_construction_tx(GetAllocator());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
- MoveIterator<A>(storage_view.data +
- (move_construction_destination_index - insert_count)));
- absl::Span<ValueType<A>> move_construction = {
+ IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
+ MoveIterator<A>(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<ValueType<A>> move_construction = {
storage_view.data + move_construction_destination_index,
new_size - move_construction_destination_index};
- Pointer<A> move_assignment_values = storage_view.data + insert_index;
- absl::Span<ValueType<A>> move_assignment = {
+ Pointer<A> move_assignment_values = storage_view.data + insert_index;
+ absl::Span<ValueType<A>> move_assignment = {
storage_view.data + insert_end_index,
move_construction_destination_index - insert_end_index};
- absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
- move_construction.size()};
+ absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
+ move_construction.size()};
- absl::Span<ValueType<A>> insert_construction = {
+ absl::Span<ValueType<A>> insert_construction = {
insert_assignment.data() + insert_assignment.size(),
insert_count - insert_assignment.size()};
move_construction_tx.Construct(move_construction.data(),
- move_construction_values,
+ move_construction_values,
move_construction.size());
- for (Pointer<A>
- destination = move_assignment.data() + move_assignment.size(),
- last_destination = move_assignment.data(),
- source = move_assignment_values + move_assignment.size();
+ for (Pointer<A>
+ destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
;) {
--destination;
--source;
@@ -699,29 +699,29 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
*destination = std::move(*source);
}
- AssignElements<A>(insert_assignment.data(), values,
- insert_assignment.size());
+ AssignElements<A>(insert_assignment.data(), values,
+ insert_assignment.size());
- ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
- insert_construction.size());
+ ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
+ insert_construction.size());
- std::move(move_construction_tx).Commit();
+ std::move(move_construction_tx).Commit();
AddSize(insert_count);
- return Iterator<A>(storage_view.data + insert_index);
+ return Iterator<A>(storage_view.data + insert_index);
}
}
template <typename T, size_t N, typename A>
template <typename... Args>
-auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
- StorageView<A> storage_view = MakeStorageView();
- const SizeType<A> n = storage_view.size;
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ const SizeType<A> n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
// Fast path; new element fits.
- Pointer<A> last_ptr = storage_view.data + n;
- AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
- std::forward<Args>(args)...);
+ Pointer<A> last_ptr = storage_view.data + n;
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
@@ -731,83 +731,83 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
template <typename T, size_t N, typename A>
template <typename... Args>
-auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
- StorageView<A> storage_view = MakeStorageView();
- AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data));
- SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
- Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
- Pointer<A> last_ptr = construct_data + storage_view.size;
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+ SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+ Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
+ Pointer<A> last_ptr = construct_data + storage_view.size;
// Construct new element.
- AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
- std::forward<Args>(args)...);
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
// Move elements from old backing store to new backing store.
ABSL_INTERNAL_TRY {
- ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
- storage_view.size);
+ ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
+ storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
+ AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
DeallocateIfAllocated();
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
AddSize(1);
return *last_ptr;
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
- -> Iterator<A> {
- StorageView<A> storage_view = MakeStorageView();
+auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
+ -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- SizeType<A> erase_size = std::distance(from, to);
- SizeType<A> erase_index =
- std::distance(ConstIterator<A>(storage_view.data), from);
- SizeType<A> erase_end_index = erase_index + erase_size;
+ SizeType<A> erase_size = std::distance(from, to);
+ SizeType<A> erase_index =
+ std::distance(ConstIterator<A>(storage_view.data), from);
+ SizeType<A> erase_end_index = erase_index + erase_size;
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data + erase_end_index));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data + erase_end_index));
- AssignElements<A>(storage_view.data + erase_index, move_values,
- storage_view.size - erase_end_index);
+ AssignElements<A>(storage_view.data + erase_index, move_values,
+ storage_view.size - erase_end_index);
- DestroyElements<A>(GetAllocator(),
- storage_view.data + (storage_view.size - erase_size),
- erase_size);
+ DestroyElements<A>(GetAllocator(),
+ storage_view.data + (storage_view.size - erase_size),
+ erase_size);
SubtractSize(erase_size);
- return Iterator<A>(storage_view.data + erase_index);
+ return Iterator<A>(storage_view.data + erase_index);
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
- StorageView<A> storage_view = MakeStorageView();
+auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
+ StorageView<A> storage_view = MakeStorageView();
if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
- AllocationTransaction<A> allocation_tx(GetAllocator());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- SizeType<A> new_requested_capacity =
+ SizeType<A> new_requested_capacity =
ComputeCapacity(storage_view.capacity, requested_capacity);
- Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
+ Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
- ConstructElements<A>(GetAllocator(), new_data, move_values,
- storage_view.size);
+ ConstructElements<A>(GetAllocator(), new_data, move_values,
+ storage_view.size);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
DeallocateIfAllocated();
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -816,44 +816,44 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
// May only be called on allocated instances!
assert(GetIsAllocated());
- StorageView<A> storage_view{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()};
+ StorageView<A> storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
- AllocationTransaction<A> allocation_tx(GetAllocator());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- Pointer<A> construct_data;
+ Pointer<A> construct_data;
if (storage_view.size > GetInlinedCapacity()) {
- SizeType<A> requested_capacity = storage_view.size;
- construct_data = allocation_tx.Allocate(requested_capacity);
- if (allocation_tx.GetCapacity() >= storage_view.capacity) {
- // Already using the smallest available heap allocation.
- return;
- }
+ SizeType<A> requested_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(requested_capacity);
+ if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+ // Already using the smallest available heap allocation.
+ return;
+ }
} else {
construct_data = GetInlinedData();
}
ABSL_INTERNAL_TRY {
- ConstructElements<A>(GetAllocator(), construct_data, move_values,
- storage_view.size);
+ ConstructElements<A>(GetAllocator(), construct_data, move_values,
+ storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- SetAllocation({storage_view.data, storage_view.capacity});
+ SetAllocation({storage_view.data, storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
- storage_view.capacity);
+ MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+ storage_view.capacity);
if (allocation_tx.DidAllocate()) {
- SetAllocation(std::move(allocation_tx).Release());
+ SetAllocation(std::move(allocation_tx).Release());
} else {
UnsetIsAllocated();
}
@@ -871,56 +871,56 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
Storage* large_ptr = other_storage_ptr;
if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
- for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
+ for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
}
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
- ConstructElements<A>(large_ptr->GetAllocator(),
- small_ptr->GetInlinedData() + small_ptr->GetSize(),
- move_values,
- large_ptr->GetSize() - small_ptr->GetSize());
+ ConstructElements<A>(large_ptr->GetAllocator(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(),
+ move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
- DestroyElements<A>(large_ptr->GetAllocator(),
- large_ptr->GetInlinedData() + small_ptr->GetSize(),
- large_ptr->GetSize() - small_ptr->GetSize());
+ DestroyElements<A>(large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
- StorageView<A> allocated_storage_view{
- allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
- allocated_ptr->GetAllocatedCapacity()};
+ StorageView<A> allocated_storage_view{
+ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(inlined_ptr->GetInlinedData()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(inlined_ptr->GetInlinedData()));
ABSL_INTERNAL_TRY {
- ConstructElements<A>(inlined_ptr->GetAllocator(),
- allocated_ptr->GetInlinedData(), move_values,
- inlined_ptr->GetSize());
+ ConstructElements<A>(inlined_ptr->GetAllocator(),
+ allocated_ptr->GetInlinedData(), move_values,
+ inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocation(
- {allocated_storage_view.data, allocated_storage_view.capacity});
+ allocated_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(inlined_ptr->GetAllocator(),
- inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
+ DestroyElements<A>(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
- inlined_ptr->SetAllocation(
- {allocated_storage_view.data, allocated_storage_view.capacity});
+ inlined_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
- swap(GetAllocator(), other_storage_ptr->GetAllocator());
+ swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
-// End ignore "array-bounds" and "maybe-uninitialized"
+// End ignore "array-bounds" and "maybe-uninitialized"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
index c7df2efc62..3722cddb64 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -51,9 +51,9 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using key_arg = typename KeyArgImpl::template type<K, key_type>;
static_assert(!std::is_reference<key_type>::value, "");
-
- // TODO(b/187807849): Evaluate whether to support reference mapped_type and
- // remove this assertion if/when it is supported.
+
+ // TODO(b/187807849): Evaluate whether to support reference mapped_type and
+ // remove this assertion if/when it is supported.
static_assert(!std::is_reference<mapped_type>::value, "");
using iterator = typename raw_hash_map::raw_hash_set::iterator;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 687bcb8a4d..8173b7fcdb 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -23,12 +23,12 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
-alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
- ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
-
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
+ ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+
constexpr size_t Group::kWidth;
// Returns "random" seed.
@@ -43,24 +43,24 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
-void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == ctrl_t::kSentinel);
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == ctrl_t::kSentinel);
assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
- ctrl[capacity] = ctrl_t::kSentinel;
+ std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
}
-// Extern template instantiotion for inline function.
-template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+// Extern template instantiotion for inline function.
+template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 12682b3532..fdda6ad9bf 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -87,17 +87,17 @@
//
// This probing function guarantees that after N probes, all the groups of the
// table will be probed exactly once.
-//
-// The control state and slot array are stored contiguously in a shared heap
-// allocation. The layout of this allocation is: `capacity()` control bytes,
-// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
-// <possible padding>, `capacity()` slots. The sentinel control byte is used in
-// iteration so we know when we reach the end of the table. The cloned control
-// bytes at the end of the table are cloned from the beginning of the table so
-// groups that begin near the end of the table can see a full group. In cases in
-// which there are more than `capacity()` cloned control bytes, the extra bytes
-// are `kEmpty`, and these ensure that we always see at least one empty slot and
-// can stop an unsuccessful search.
+//
+// The control state and slot array are stored contiguously in a shared heap
+// allocation. The layout of this allocation is: `capacity()` control bytes,
+// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
+// <possible padding>, `capacity()` slots. The sentinel control byte is used in
+// iteration so we know when we reach the end of the table. The cloned control
+// bytes at the end of the table are cloned from the beginning of the table so
+// groups that begin near the end of the table can see a full group. In cases in
+// which there are more than `capacity()` cloned control bytes, the extra bytes
+// are `kEmpty`, and these ensure that we always see at least one empty slot and
+// can stop an unsuccessful search.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -265,50 +265,50 @@ class BitMask {
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
-// below for details. We use an enum class so that when strict aliasing is
-// enabled, the compiler knows ctrl_t doesn't alias other types.
-enum class ctrl_t : int8_t {
+// below for details. We use an enum class so that when strict aliasing is
+// enabled, the compiler knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
kSentinel = -1, // 0b11111111
};
static_assert(
- (static_cast<int8_t>(ctrl_t::kEmpty) &
- static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
"Special markers need to have the MSB to make checking for them efficient");
-static_assert(
- ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
- "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(
- ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
- "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
-static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
- "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
"existence efficient (psignb xmm, xmm)");
-static_assert(
- (~static_cast<int8_t>(ctrl_t::kEmpty) &
- ~static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
- "shared by ctrl_t::kSentinel to make the scalar test for "
- "MatchEmptyOrDeleted() efficient");
-static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
- "ctrl_t::kDeleted must be -2 to make the implementation of "
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MatchEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
// A single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find().
-ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+ABSL_DLL extern const ctrl_t kEmptyGroup[16];
inline ctrl_t* EmptyGroup() {
- return const_cast<ctrl_t*>(kEmptyGroup);
+ return const_cast<ctrl_t*>(kEmptyGroup);
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
// Returns a hash seed.
//
@@ -324,12 +324,12 @@ inline size_t HashSeed(const ctrl_t* ctrl) {
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
return (hash >> 7) ^ HashSeed(ctrl);
}
-inline h2_t H2(size_t hash) { return hash & 0x7F; }
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
-inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
-inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
@@ -366,24 +366,24 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
- // This only works because ctrl_t::kEmpty is -128.
+ // This only works because ctrl_t::kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
- return Match(static_cast<h2_t>(ctrl_t::kEmpty));
+ return Match(static_cast<h2_t>(ctrl_t::kEmpty));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
@@ -418,7 +418,7 @@ struct GroupPortableImpl {
//
// Caveat: there are false positives but:
// - they only occur if there is a real match
- // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
// - they will be handled gracefully by subsequent checks in code
//
// Example:
@@ -463,10 +463,10 @@ using Group = GroupSse2Impl;
using Group = GroupPortableImpl;
#endif
-// The number of cloned control bytes that we copy from the beginning to the
-// end of the control bytes array.
-constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
-
+// The number of cloned control bytes that we copy from the beginning to the
+// end of the control bytes array.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
@@ -474,8 +474,8 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// PRECONDITION:
// IsValidCapacity(capacity)
-// ctrl[capacity] == ctrl_t::kSentinel
-// ctrl[i] != ctrl_t::kSentinel for all i < capacity
+// ctrl[capacity] == ctrl_t::kSentinel
+// ctrl[i] != ctrl_t::kSentinel for all i < capacity
// Applies mapping for every byte in ctrl:
// DELETED -> EMPTY
// EMPTY -> EMPTY
@@ -517,22 +517,22 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
-template <class InputIter>
-size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
- size_t bucket_count) {
- if (bucket_count != 0) {
- return bucket_count;
- }
- using InputIterCategory =
- typename std::iterator_traits<InputIter>::iterator_category;
- if (std::is_base_of<std::random_access_iterator_tag,
- InputIterCategory>::value) {
- return GrowthToLowerboundCapacity(
- static_cast<size_t>(std::distance(first, last)));
- }
- return 0;
-}
-
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+ size_t bucket_count) {
+ if (bucket_count != 0) {
+ return bucket_count;
+ }
+ using InputIterCategory =
+ typename std::iterator_traits<InputIter>::iterator_category;
+ if (std::is_base_of<std::random_access_iterator_tag,
+ InputIterCategory>::value) {
+ return GrowthToLowerboundCapacity(
+ static_cast<size_t>(std::distance(first, last)));
+ }
+ return 0;
+}
+
inline void AssertIsFull(ctrl_t* ctrl) {
ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
"Invalid operation on iterator. The element might have "
@@ -560,29 +560,29 @@ struct FindInfo {
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
-// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
-// NOTE: this function must work with tables having both ctrl_t::kEmpty and
-// ctrl_t::kDeleted in one group. Such tables appears during
-// drop_deletes_without_resize.
+// NOTE: this function must work with tables having both ctrl_t::kEmpty and
+// ctrl_t::kDeleted in one group. Such tables appears during
+// drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
-template <typename = void>
-inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
+template <typename = void>
+inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
@@ -601,60 +601,60 @@ inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() <= capacity && "full table!");
- }
-}
-
-// Extern template for inline function keep possibility of inlining.
-// When compiler decided to not inline, no symbols will be added to the
-// corresponding translation unit.
-extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-
-// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
-inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
- size_t slot_size) {
- std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
- capacity + 1 + NumClonedBytes());
- ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
-}
-
-// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
-// at the end too.
-inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
- const void* slot, size_t slot_size) {
- assert(i < capacity);
-
- auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
- if (IsFull(h)) {
- SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
- } else {
- SanitizerPoisonMemoryRegion(slot_i, slot_size);
+ assert(seq.index() <= capacity && "full table!");
}
-
- ctrl[i] = h;
- ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
-}
-
-inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
- const void* slot, size_t slot_size) {
- SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
-}
-
-// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
-// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
-// SlotOffset returns the offset of the slots into the allocated block.
-inline size_t SlotOffset(size_t capacity, size_t slot_align) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
-}
-
-// Returns the size of the allocated block. See also above comment.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
+// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
+ size_t slot_size) {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
+}
+
+// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
+// at the end too.
+inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ assert(i < capacity);
+
+ auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
+ if (IsFull(h)) {
+ SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+ } else {
+ SanitizerPoisonMemoryRegion(slot_i, slot_size);
+ }
+
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+}
+
+inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
+}
+
+// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
+// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
+// SlotOffset returns the offset of the slots into the allocated block.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
+}
+
+// Returns the size of the allocated block. See also above comment.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -813,7 +813,7 @@ class raw_hash_set {
ctrl_ += shift;
slot_ += shift;
}
- if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
@@ -894,8 +894,8 @@ class raw_hash_set {
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
- hash, eq, alloc) {
+ : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+ hash, eq, alloc) {
insert(first, last);
}
@@ -983,8 +983,8 @@ class raw_hash_set {
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(ctrl_, hash, capacity_);
- SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
- sizeof(slot_type));
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
emplace_at(target.offset, v);
infoz().RecordInsert(hash, target.probe_length);
}
@@ -1080,8 +1080,8 @@ class raw_hash_set {
// past that we simply deallocate the array.
if (capacity_ > 127) {
destroy_slots();
-
- infoz().RecordClearedReservation();
+
+ infoz().RecordClearedReservation();
} else if (capacity_) {
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
@@ -1089,7 +1089,7 @@ class raw_hash_set {
}
}
size_ = 0;
- ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
}
assert(empty());
@@ -1395,31 +1395,31 @@ class raw_hash_set {
if (n == 0 && size_ == 0) {
destroy_slots();
infoz().RecordStorageChanged(0, 0);
- infoz().RecordClearedReservation();
+ infoz().RecordClearedReservation();
return;
}
-
+
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
-
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
void reserve(size_t n) {
- if (n > size() + growth_left()) {
- size_t m = GrowthToLowerboundCapacity(n);
+ if (n > size() + growth_left()) {
+ size_t m = GrowthToLowerboundCapacity(n);
resize(NormalizeCapacity(m));
-
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
@@ -1446,7 +1446,7 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
#if defined(__GNUC__)
- prefetch_heap_block();
+ prefetch_heap_block();
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
@@ -1473,12 +1473,12 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
}
template <class K = key_type>
iterator find(const key_arg<K>& key) {
- prefetch_heap_block();
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1488,7 +1488,7 @@ class raw_hash_set {
}
template <class K = key_type>
const_iterator find(const key_arg<K>& key) const {
- prefetch_heap_block();
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1623,8 +1623,8 @@ class raw_hash_set {
static_cast<size_t>(empty_after.TrailingZeros() +
empty_before.LeadingZeros()) < Group::kWidth;
- SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
- capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+ capacity_, ctrl_, slots_, sizeof(slot_type));
growth_left() += was_never_full;
infoz().RecordErase();
}
@@ -1643,16 +1643,16 @@ class raw_hash_set {
// bound more carefully.
if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
slots_ == nullptr) {
- infoz() = Sample(sizeof(slot_type));
+ infoz() = Sample(sizeof(slot_type));
}
- char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
- &alloc_ref(),
- AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
- ctrl_ = reinterpret_cast<ctrl_t*>(mem);
- slots_ = reinterpret_cast<slot_type*>(
- mem + SlotOffset(capacity_, alignof(slot_type)));
- ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
+ char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
+ &alloc_ref(),
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
+ ctrl_ = reinterpret_cast<ctrl_t*>(mem);
+ slots_ = reinterpret_cast<slot_type*>(
+ mem + SlotOffset(capacity_, alignof(slot_type)));
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
infoz().RecordStorageChanged(size_, capacity_);
}
@@ -1664,12 +1664,12 @@ class raw_hash_set {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
-
+
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), ctrl_,
- AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), ctrl_,
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
ctrl_ = EmptyGroup();
slots_ = nullptr;
size_ = 0;
@@ -1693,16 +1693,16 @@ class raw_hash_set {
auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
}
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), old_ctrl,
- AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), old_ctrl,
+ AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
infoz().RecordRehash(total_probe_length);
}
@@ -1732,35 +1732,35 @@ class raw_hash_set {
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
- const size_t hash = PolicyTraits::apply(
- HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
- const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
- const size_t new_i = target.offset;
+ const size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
+ const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
+ const size_t new_i = target.offset;
total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
- const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
- const auto probe_index = [probe_offset, this](size_t pos) {
- return ((pos - probe_offset) & capacity_) / Group::kWidth;
+ const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
+ const auto probe_index = [probe_offset, this](size_t pos) {
+ return ((pos - probe_offset) & capacity_) / Group::kWidth;
};
// Element doesn't move.
if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
continue;
}
if (IsEmpty(ctrl_[new_i])) {
// Transfer element to the empty spot.
- // SetCtrl poisons/unpoisons the slots so we have to call it at the
+ // SetCtrl poisons/unpoisons the slots so we have to call it at the
// right time.
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
- SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
} else {
assert(IsDeleted(ctrl_[new_i]));
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
// Until we are done rehashing, DELETED marks previously FULL slots.
// Swap i and new_i elements.
PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
@@ -1776,50 +1776,50 @@ class raw_hash_set {
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
- } else if (capacity_ > Group::kWidth &&
- // Do these calcuations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
+ } else if (capacity_ > Group::kWidth &&
+ // Do these calcuations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
- //
- // Rehash in place if the current size is <= 25/32 of capacity_.
- // Rationale for such a high factor: 1) drop_deletes_without_resize() is
- // faster than resize, and 2) it takes quite a bit of work to add
- // tombstones. In the worst case, seems to take approximately 4
- // insert/erase pairs to create a single tombstone and so if we are
- // rehashing because of tombstones, we can afford to rehash-in-place as
- // long as we are reclaiming at least 1/8 the capacity without doing more
- // than 2X the work. (Where "work" is defined to be size() for rehashing
- // or rehashing in place, and 1 for an insert or erase.) But rehashing in
- // place is faster per operation than inserting or even doubling the size
- // of the table, so we actually afford to reclaim even less space from a
- // resize-in-place. The decision is to rehash in place if we can reclaim
- // at about 1/8th of the usable capacity (specifically 3/28 of the
- // capacity) which means that the total cost of rehashing will be a small
- // fraction of the total work.
- //
- // Here is output of an experiment using the BM_CacheInSteadyState
- // benchmark running the old case (where we rehash-in-place only if we can
- // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
- // if we can recover 3/32*capacity_).
- //
- // Note that although in the worst-case number of rehashes jumped up from
- // 15 to 190, but the number of operations per second is almost the same.
- //
- // Abridged output of running BM_CacheInSteadyState benchmark from
- // raw_hash_set_benchmark. N is the number of insert/erase operations.
- //
- // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
- // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
- // 448 | 145284 0.44 18 | 140118 0.44 19
- // 493 | 152546 0.24 11 | 151417 0.48 28
- // 538 | 151439 0.26 11 | 151152 0.53 38
- // 583 | 151765 0.28 11 | 150572 0.57 50
- // 628 | 150241 0.31 11 | 150853 0.61 66
- // 672 | 149602 0.33 12 | 150110 0.66 90
- // 717 | 149998 0.35 12 | 149531 0.70 129
- // 762 | 149836 0.37 13 | 148559 0.74 190
- // 807 | 149736 0.39 14 | 151107 0.39 14
- // 852 | 150204 0.42 15 | 151019 0.42 15
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity_.
+ // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity_).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
@@ -1839,7 +1839,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return false;
}
@@ -1859,7 +1859,7 @@ class raw_hash_set {
protected:
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
- prefetch_heap_block();
+ prefetch_heap_block();
auto hash = hash_ref()(key);
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
@@ -1872,7 +1872,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
@@ -1886,8 +1886,8 @@ class raw_hash_set {
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
- SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
- sizeof(slot_type));
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1920,15 +1920,15 @@ class raw_hash_set {
growth_left() = CapacityToGrowth(capacity()) - size_;
}
- size_t& growth_left() { return settings_.template get<0>(); }
+ size_t& growth_left() { return settings_.template get<0>(); }
- void prefetch_heap_block() const {
- // Prefetch the heap-allocated memory region to resolve potential TLB
- // misses. This is intended to overlap with execution of calculating the
- // hash for a key.
-#if defined(__GNUC__)
- __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
-#endif // __GNUC__
+ void prefetch_heap_block() const {
+ // Prefetch the heap-allocated memory region to resolve potential TLB
+ // misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+#if defined(__GNUC__)
+ __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
+#endif // __GNUC__
}
HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
@@ -1945,10 +1945,10 @@ class raw_hash_set {
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
+ ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
+ slot_type* slots_ = nullptr; // [capacity * slot_type]
+ size_t size_ = 0; // number of full slots
+ size_t capacity_ = 0; // total number of slots
absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
key_equal, allocator_type>
@@ -1958,12 +1958,12 @@ class raw_hash_set {
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
for (auto it = c->begin(), last = c->end(); it != last;) {
- if (pred(*it)) {
- c->erase(it++);
- } else {
- ++it;
+ if (pred(*it)) {
+ c->erase(it++);
+ } else {
+ ++it;
}
}
}
@@ -1998,7 +1998,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity_;
if (capacity == 0) return 0;
- size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
+ size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -2016,8 +2016,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- size_t m =
- AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
+ size_t m =
+ AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * size;
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
index 3fe7e7b5c0..19ff37aae3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set/ya.make
@@ -10,26 +10,26 @@ LICENSE(Apache-2.0)
PEERDIR(
contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
contrib/restricted/abseil-cpp/absl/base/log_severity
- contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
- contrib/restricted/abseil-cpp/absl/debugging
- contrib/restricted/abseil-cpp/absl/debugging/stacktrace
- contrib/restricted/abseil-cpp/absl/debugging/symbolize
- contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/hash
+ contrib/restricted/abseil-cpp/absl/container/internal/absl_hashtablez_sampler
+ contrib/restricted/abseil-cpp/absl/debugging
+ contrib/restricted/abseil-cpp/absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp/absl/debugging/symbolize
+ contrib/restricted/abseil-cpp/absl/demangle
+ contrib/restricted/abseil-cpp/absl/hash
contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
+ contrib/restricted/abseil-cpp/absl/profiling/internal/exponential_biased
contrib/restricted/abseil-cpp/absl/strings
- contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
- contrib/restricted/abseil-cpp/absl/synchronization
- contrib/restricted/abseil-cpp/absl/synchronization/internal
- contrib/restricted/abseil-cpp/absl/time
- contrib/restricted/abseil-cpp/absl/time/civil_time
- contrib/restricted/abseil-cpp/absl/time/time_zone
+ contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
+ contrib/restricted/abseil-cpp/absl/synchronization
+ contrib/restricted/abseil-cpp/absl/synchronization/internal
+ contrib/restricted/abseil-cpp/absl/time
+ contrib/restricted/abseil-cpp/absl/time/civil_time
+ contrib/restricted/abseil-cpp/absl/time/time_zone
contrib/restricted/abseil-cpp/absl/types
contrib/restricted/abseil-cpp/absl/types/bad_optional_access
)
@@ -46,10 +46,10 @@ CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
+SRCDIR(contrib/restricted/abseil-cpp/absl/container/internal)
SRCS(
- raw_hash_set.cc
+ raw_hash_set.cc
)
END()
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index c1d20f3c52..639567de48 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -179,7 +179,7 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::UniqueGenerator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
@@ -198,7 +198,7 @@ void InputIteratorBucketAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::UniqueGenerator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
@@ -221,7 +221,7 @@ void InputIteratorBucketHashAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::UniqueGenerator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
@@ -241,9 +241,9 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -263,9 +263,9 @@ void CopyConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -287,9 +287,9 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -310,9 +310,9 @@ void MoveConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(gen());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -329,7 +329,7 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -352,7 +352,7 @@ template <typename TypeParam>
void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
@@ -375,7 +375,7 @@ void InitializerListBucketHashAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
@@ -396,7 +396,7 @@ TYPED_TEST_P(ConstructorTest, Assignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
@@ -416,7 +416,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
@@ -428,7 +428,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -437,7 +437,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
@@ -446,7 +446,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
@@ -456,7 +456,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -465,7 +465,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::UniqueGenerator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
index d3543936f7..b7676bbc9a 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -81,38 +81,38 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
-TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
- using T = hash_internal::GeneratedType<TypeParam>;
- using V = typename TypeParam::mapped_type;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- m.reserve(10);
- const size_t original_capacity = m.bucket_count();
- m.insert(val);
- EXPECT_EQ(m.bucket_count(), original_capacity);
- T val2 = {val.first, hash_internal::Generator<V>()()};
- m.insert(val2);
- EXPECT_EQ(m.bucket_count(), original_capacity);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
-#if !defined(__GLIBCXX__)
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> base_values;
- std::generate_n(std::back_inserter(base_values), 10,
- hash_internal::Generator<T>());
- std::vector<T> values;
- while (values.size() != 100) {
- std::copy_n(base_values.begin(), 10, std::back_inserter(values));
- }
- TypeParam m;
- m.reserve(10);
- const size_t original_capacity = m.bucket_count();
- m.insert(values.begin(), values.end());
- EXPECT_EQ(m.bucket_count(), original_capacity);
-#endif
-}
-
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ m.insert(val2);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ std::copy_n(base_values.begin(), 10, std::back_inserter(values));
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
#ifdef UNORDERED_MAP_CXX17
using std::get;
@@ -298,10 +298,10 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertWithinCapacity,
- InsertRangeWithinCapacity, InsertOrAssign,
- InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
- TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, InsertOrAssign,
+ InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
+ TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
template <typename Type>
struct is_unique_ptr : std::false_type {};
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
index 6e473e45da..68b0c77eff 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -74,36 +74,36 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
-TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
- using T = hash_internal::GeneratedType<TypeParam>;
- T val = hash_internal::Generator<T>()();
- TypeParam m;
- m.reserve(10);
- const size_t original_capacity = m.bucket_count();
- m.insert(val);
- EXPECT_EQ(m.bucket_count(), original_capacity);
- m.insert(val);
- EXPECT_EQ(m.bucket_count(), original_capacity);
-}
-
-TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
-#if !defined(__GLIBCXX__)
- using T = hash_internal::GeneratedType<TypeParam>;
- std::vector<T> base_values;
- std::generate_n(std::back_inserter(base_values), 10,
- hash_internal::Generator<T>());
- std::vector<T> values;
- while (values.size() != 100) {
- values.insert(values.end(), base_values.begin(), base_values.end());
- }
- TypeParam m;
- m.reserve(10);
- const size_t original_capacity = m.bucket_count();
- m.insert(values.begin(), values.end());
- EXPECT_EQ(m.bucket_count(), original_capacity);
-#endif
-}
-
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ values.insert(values.end(), base_values.begin(), base_values.end());
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
@@ -210,9 +210,9 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertWithinCapacity,
- InsertRangeWithinCapacity, Emplace, EmplaceHint,
- Erase, EraseRange, EraseKey, Swap);
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, Emplace, EmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
ABSL_NAMESPACE_END