aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.com>2024-08-04 15:38:59 +0300
committerthegeorg <thegeorg@yandex-team.com>2024-08-04 15:48:34 +0300
commitdf5a8e17b7f64a8c7086988670187d172bf9392a (patch)
tree5a3d3d96389e94473f2465104afaae2abca603bc
parente30f7061bad712a7934e383fef27613fc6a1b2d7 (diff)
downloadydb-df5a8e17b7f64a8c7086988670187d172bf9392a.tar.gz
Update contrib/restricted/abseil-cpp to 20240722.0
d546de87f4113d314e722b1646c13db9f6aa5026
-rw-r--r--contrib/restricted/abseil-cpp/absl/algorithm/container.h75
-rw-r--r--contrib/restricted/abseil-cpp/absl/algorithm/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/attributes.h84
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/config.h85
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/poison.cc84
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/poison.h59
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc12
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/macros.h48
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/no_destructor.h75
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/nullability.h40
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/optimization.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/options.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/prefetch.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/ya.make1
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h80
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h66
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/hash_container_defaults.h45
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/inlined_vector.h13
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h31
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h31
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h35
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h69
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h50
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc29
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h34
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h74
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc405
-rw-r--r--contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h1757
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h36
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_cord_state.cc7
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc3
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_x86_arm_combined.cc20
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc18
-rw-r--r--contrib/restricted/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h35
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/bounded_utf8_length_sequence.h126
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc258
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h55
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc1143
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.h3
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc925
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.h42
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.cc59
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.h15
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc17
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.cc70
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.h47
-rw-r--r--contrib/restricted/abseil-cpp/absl/debugging/ya.make3
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/flag.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc147
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/internal/flag.h222
-rw-r--r--contrib/restricted/abseil-cpp/absl/flags/reflection.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/any_invocable.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h14
-rw-r--r--contrib/restricted/abseil-cpp/absl/functional/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/hash.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc88
-rw-r--r--contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/absl_vlog_is_on.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/die_if_null.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/globals.h43
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc20
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/check_op.h84
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/conditions.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/log_impl.h46
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc151
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/log_message.h27
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h11
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/internal/strip.h36
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/log.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/log_sink.h15
-rw-r--r--contrib/restricted/abseil-cpp/absl/log/vlog_is_on.h72
-rw-r--r--contrib/restricted/abseil-cpp/absl/memory/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/meta/type_traits.h180
-rw-r--r--contrib/restricted/abseil-cpp/absl/meta/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128.cc3
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128.h40
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128_have_intrinsic.inc14
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/int128_no_intrinsic.inc18
-rw-r--r--contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/profiling/internal/periodic_sampler.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/random/seed_sequences.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h63
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/status.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/status/statusor.h250
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/ascii.cc103
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.cc35
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/cord.h75
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/escaping.cc91
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/escaping.h33
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/has_absl_stringify.h1
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.h10
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h38
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.cc32
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.h36
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle.cc48
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc19
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.h13
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/escaping.cc7
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h25
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.cc440
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/numbers.h163
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_cat.cc191
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_cat.h154
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_format.h2
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_join.h24
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/str_split.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/string_view.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/strings/substitute.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc13
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.h9
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/civil_time.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/clock.cc16
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/duration.cc111
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/format.cc3
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc2
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc29
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/time/time.h102
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/compare.h505
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/internal/optional.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/internal/variant.h122
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/optional.h33
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/span.h5
-rw-r--r--contrib/restricted/abseil-cpp/absl/types/variant.h43
-rw-r--r--contrib/restricted/abseil-cpp/absl/utility/utility.h44
-rw-r--r--contrib/restricted/abseil-cpp/absl/utility/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp/ya.make6
140 files changed, 7858 insertions, 2675 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/algorithm/container.h b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
index c7bafae147..6bbe3b5adf 100644
--- a/contrib/restricted/abseil-cpp/absl/algorithm/container.h
+++ b/contrib/restricted/abseil-cpp/absl/algorithm/container.h
@@ -44,6 +44,7 @@
#include <cassert>
#include <iterator>
#include <numeric>
+#include <random>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
@@ -51,6 +52,7 @@
#include <vector>
#include "absl/algorithm/algorithm.h"
+#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/meta/type_traits.h"
@@ -92,17 +94,17 @@ using ContainerPointerType =
// using std::end;
// std::foo(begin(c), end(c));
// becomes
-// std::foo(container_algorithm_internal::begin(c),
-// container_algorithm_internal::end(c));
+// std::foo(container_algorithm_internal::c_begin(c),
+// container_algorithm_internal::c_end(c));
// These are meant for internal use only.
template <typename C>
-ContainerIter<C> c_begin(C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_begin(C& c) {
return begin(c);
}
template <typename C>
-ContainerIter<C> c_end(C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_end(C& c) {
return end(c);
}
@@ -145,8 +147,9 @@ bool c_linear_search(const C& c, EqualityComparable&& value) {
// Container-based version of the <iterator> `std::distance()` function to
// return the number of elements within a container.
template <typename C>
-container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
- const C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerDifferenceType<const C>
+ c_distance(const C& c) {
return std::distance(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
@@ -210,6 +213,16 @@ container_algorithm_internal::ContainerIter<C> c_find(C& c, T&& value) {
std::forward<T>(value));
}
+// c_contains()
+//
+// Container-based version of the <algorithm> `std::ranges::contains()` C++23
+// function to search a container for a value.
+template <typename Sequence, typename T>
+bool c_contains(const Sequence& sequence, T&& value) {
+ return absl::c_find(sequence, std::forward<T>(value)) !=
+ container_algorithm_internal::c_end(sequence);
+}
+
// c_find_if()
//
// Container-based version of the <algorithm> `std::find_if()` function to find
@@ -426,6 +439,26 @@ container_algorithm_internal::ContainerIter<Sequence1> c_search(
std::forward<BinaryPredicate>(pred));
}
+// c_contains_subrange()
+//
+// Container-based version of the <algorithm> `std::ranges::contains_subrange()`
+// C++23 function to search a container for a subsequence.
+template <typename Sequence1, typename Sequence2>
+bool c_contains_subrange(Sequence1& sequence, Sequence2& subsequence) {
+ return absl::c_search(sequence, subsequence) !=
+ container_algorithm_internal::c_end(sequence);
+}
+
+// Overload of c_contains_subrange() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+bool c_contains_subrange(Sequence1& sequence, Sequence2& subsequence,
+ BinaryPredicate&& pred) {
+ return absl::c_search(sequence, subsequence,
+ std::forward<BinaryPredicate>(pred)) !=
+ container_algorithm_internal::c_end(sequence);
+}
+
// c_search_n()
//
// Container-based version of the <algorithm> `std::search_n()` function to
@@ -1500,8 +1533,9 @@ c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
// to return an iterator pointing to the element with the smallest value, using
// `operator<` to make the comparisons.
template <typename Sequence>
-container_algorithm_internal::ContainerIter<Sequence> c_min_element(
- Sequence& sequence) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIter<Sequence>
+ c_min_element(Sequence& sequence) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
@@ -1509,8 +1543,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_min_element(
// Overload of c_min_element() for performing a `comp` comparison other than
// `operator<`.
template <typename Sequence, typename LessThan>
-container_algorithm_internal::ContainerIter<Sequence> c_min_element(
- Sequence& sequence, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIter<Sequence>
+ c_min_element(Sequence& sequence, LessThan&& comp) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
@@ -1522,8 +1557,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_min_element(
// to return an iterator pointing to the element with the largest value, using
// `operator<` to make the comparisons.
template <typename Sequence>
-container_algorithm_internal::ContainerIter<Sequence> c_max_element(
- Sequence& sequence) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIter<Sequence>
+ c_max_element(Sequence& sequence) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
@@ -1531,8 +1567,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_max_element(
// Overload of c_max_element() for performing a `comp` comparison other than
// `operator<`.
template <typename Sequence, typename LessThan>
-container_algorithm_internal::ContainerIter<Sequence> c_max_element(
- Sequence& sequence, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIter<Sequence>
+ c_max_element(Sequence& sequence, LessThan&& comp) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
@@ -1545,8 +1582,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_max_element(
// smallest and largest values, respectively, using `operator<` to make the
// comparisons.
template <typename C>
-container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
- C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIterPairType<C, C>
+ c_minmax_element(C& c) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
@@ -1554,8 +1592,9 @@ container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
// Overload of c_minmax_element() for performing `comp` comparisons other than
// `operator<`.
template <typename C, typename LessThan>
-container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
- C& c, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+ container_algorithm_internal::ContainerIterPairType<C, C>
+ c_minmax_element(C& c, LessThan&& comp) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
diff --git a/contrib/restricted/abseil-cpp/absl/algorithm/ya.make b/contrib/restricted/abseil-cpp/absl/algorithm/ya.make
index ac5695f072..467248d279 100644
--- a/contrib/restricted/abseil-cpp/absl/algorithm/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/algorithm/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
NO_RUNTIME()
diff --git a/contrib/restricted/abseil-cpp/absl/base/attributes.h b/contrib/restricted/abseil-cpp/absl/base/attributes.h
index d4f67a1295..5ea5ee3ef8 100644
--- a/contrib/restricted/abseil-cpp/absl/base/attributes.h
+++ b/contrib/restricted/abseil-cpp/absl/base/attributes.h
@@ -195,6 +195,9 @@
// ABSL_ATTRIBUTE_NORETURN
//
// Tells the compiler that a given function never returns.
+//
+// Deprecated: Prefer the `[[noreturn]]` attribute standardized by C++11 over
+// this macro.
#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
#elif defined(_MSC_VER)
@@ -702,6 +705,11 @@
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
_Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("warning(push)") _Pragma("warning(disable: 4996)")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("warning(pop)")
#else
#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
@@ -808,14 +816,43 @@
//
// See also the upstream documentation:
// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+// https://learn.microsoft.com/en-us/cpp/code-quality/c26816?view=msvc-170
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(msvc::lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[msvc::lifetimebound]]
#elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
#else
#define ABSL_ATTRIBUTE_LIFETIME_BOUND
#endif
+// ABSL_INTERNAL_ATTRIBUTE_VIEW indicates that a type acts like a view i.e. a
+// raw (non-owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Pointer)
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW [[gsl::Pointer]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW
+#endif
+
+// ABSL_INTERNAL_ATTRIBUTE_OWNER indicates that a type acts like a smart
+// (owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Owner)
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER [[gsl::Owner]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER
+#endif
+
// ABSL_ATTRIBUTE_TRIVIAL_ABI
// Indicates that a type is "trivially relocatable" -- meaning it can be
// relocated without invoking the constructor/destructor, using a form of move
@@ -871,4 +908,51 @@
#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
#endif
+// ABSL_ATTRIBUTE_UNINITIALIZED
+//
+// GCC and Clang support a flag `-ftrivial-auto-var-init=<option>` (<option>
+// can be "zero" or "pattern") that can be used to initialize automatic stack
+// variables. Variables with this attribute will be left uninitialized,
+// overriding the compiler flag.
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+// and https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[clang::uninitialized]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[gnu::uninitialized]]
+#elif ABSL_HAVE_ATTRIBUTE(uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED __attribute__((uninitialized))
+#else
+#define ABSL_ATTRIBUTE_UNINITIALIZED
+#endif
+
+// ABSL_ATTRIBUTE_WARN_UNUSED
+//
+// Compilers routinely warn about trivial variables that are unused. For
+// non-trivial types, this warning is suppressed since the
+// constructor/destructor may be intentional and load-bearing, for example, with
+// a RAII scoped lock.
+//
+// For example:
+//
+// class ABSL_ATTRIBUTE_WARN_UNUSED MyType {
+// public:
+// MyType();
+// ~MyType();
+// };
+//
+// void foo() {
+// // Warns with ABSL_ATTRIBUTE_WARN_UNUSED attribute present.
+// MyType unused;
+// }
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#warn-unused and
+// https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#index-warn_005funused-type-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::warn_unused)
+#define ABSL_ATTRIBUTE_WARN_UNUSED [[gnu::warn_unused]]
+#else
+#define ABSL_ATTRIBUTE_WARN_UNUSED
+#endif
+
#endif // ABSL_BASE_ATTRIBUTES_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/config.h b/contrib/restricted/abseil-cpp/absl/base/config.h
index cbd9eac58e..51687ebb52 100644
--- a/contrib/restricted/abseil-cpp/absl/base/config.h
+++ b/contrib/restricted/abseil-cpp/absl/base/config.h
@@ -117,8 +117,8 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define ABSL_LTS_RELEASE_VERSION 20240116
-#define ABSL_LTS_RELEASE_PATCH_LEVEL 2
+#define ABSL_LTS_RELEASE_VERSION 20240722
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
// Helper macro to convert a CPP variable to a string literal.
#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -231,12 +231,11 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
-// We assume __thread is supported on Linux or Asylo when compiled with Clang or
+// We assume __thread is supported on Linux when compiled with Clang or
// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
#ifdef ABSL_HAVE_TLS
#error ABSL_HAVE_TLS cannot be directly set
-#elif (defined(__linux__) || defined(__ASYLO__)) && \
- (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define ABSL_HAVE_TLS 1
#endif
@@ -275,53 +274,18 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
+
// ABSL_HAVE_THREAD_LOCAL
//
+// DEPRECATED - `thread_local` is available on all supported platforms.
// Checks whether C++11's `thread_local` storage duration specifier is
// supported.
#ifdef ABSL_HAVE_THREAD_LOCAL
#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
-#elif defined(__APPLE__)
-// Notes:
-// * Xcode's clang did not support `thread_local` until version 8, and
-// even then not for all iOS < 9.0.
-// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
-// targeting iOS 9.x.
-// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
-// making ABSL_HAVE_FEATURE unreliable there.
-//
-#if ABSL_HAVE_FEATURE(cxx_thread_local) && \
- !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
-#define ABSL_HAVE_THREAD_LOCAL 1
-#endif
-#else // !defined(__APPLE__)
+#else
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
-// There are platforms for which TLS should not be used even though the compiler
-// makes it seem like it's supported (Android NDK < r12b for example).
-// This is primarily because of linker problems and toolchain misconfiguration:
-// Abseil does not intend to support this indefinitely. Currently, the newest
-// toolchain that we intend to support that requires this behavior is the
-// r11 NDK - allowing for a 5 year support window on that means this option
-// is likely to be removed around June of 2021.
-// TLS isn't supported until NDK r12b per
-// https://developer.android.com/ndk/downloads/revision_history.html
-// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
-// <android/ndk-version.h>. For NDK < r16, users should define these macros,
-// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
-#if defined(__ANDROID__) && defined(__clang__)
-#if __has_include(<android/ndk-version.h>)
-#include <android/ndk-version.h>
-#endif // __has_include(<android/ndk-version.h>)
-#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
- defined(__NDK_MINOR__) && \
- ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
-#undef ABSL_HAVE_TLS
-#undef ABSL_HAVE_THREAD_LOCAL
-#endif
-#endif // defined(__ANDROID__) && defined(__clang__)
-
// ABSL_HAVE_INTRINSIC_INT128
//
// Checks whether the __int128 compiler extension for a 128-bit integral type is
@@ -379,9 +343,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_EXCEPTIONS 1
#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
// Handle remaining special cases and default to exceptions being supported.
-#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \
- !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \
- !defined(__cpp_exceptions)) && \
+#elif !(defined(__GNUC__) && !defined(__cpp_exceptions)) && \
!(defined(_MSC_VER) && !defined(_CPPUNWIND))
#define ABSL_HAVE_EXCEPTIONS 1
#endif
@@ -416,9 +378,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \
- defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
- defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
- defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
+ defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) || \
+ defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) || \
+ defined(__VXWORKS__) || defined(__hexagon__)
#define ABSL_HAVE_MMAP 1
#endif
@@ -902,9 +864,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set
#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0
-#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && \
- (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \
- !defined(__mips__)
+#elif defined(__GNUC__)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
#elif defined(__clang__) && !defined(_MSC_VER)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
@@ -981,6 +941,27 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_CONSTANT_EVALUATED 1
#endif
+// ABSL_INTERNAL_CONSTEXPR_SINCE_CXXYY is used to conditionally define constexpr
+// for different C++ versions.
+//
+// These macros are an implementation detail and will be unconditionally removed
+// once the minimum supported C++ version catches up to a given version.
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 constexpr
+#else
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+#endif
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 constexpr
+#else
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
+#endif
+
// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
// into an integer that can be compared against.
#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
diff --git a/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h b/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
index 7ba8912ea6..f18b5e0a8f 100644
--- a/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
+++ b/contrib/restricted/abseil-cpp/absl/base/dynamic_annotations.h
@@ -252,25 +252,9 @@ ABSL_INTERNAL_END_EXTERN_C
#else // !defined(ABSL_HAVE_MEMORY_SANITIZER)
-// TODO(rogeeff): remove this branch
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
- do { \
- (void)(address); \
- (void)(size); \
- } while (0)
-#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
- do { \
- (void)(address); \
- (void)(size); \
- } while (0)
-#else
-
#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
-#endif
-
#endif // ABSL_HAVE_MEMORY_SANITIZER
// -------------------------------------------------------------------------
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h b/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h
index 36e1b33d65..03fa2434ce 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/nullability_impl.h
@@ -19,10 +19,11 @@
#include <type_traits>
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
namespace absl {
-
+ABSL_NAMESPACE_BEGIN
namespace nullability_internal {
// `IsNullabilityCompatible` checks whether its first argument is a class
@@ -101,6 +102,7 @@ using NullabilityUnknownImpl
= T;
} // namespace nullability_internal
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/poison.cc b/contrib/restricted/abseil-cpp/absl/base/internal/poison.cc
new file mode 100644
index 0000000000..b33d4c2d3d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/poison.cc
@@ -0,0 +1,84 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/poison.h"
+
+#include <cstdlib>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
+#include <sanitizer/asan_interface.h>
+#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
+#include <sanitizer/msan_interface.h>
+#elif defined(ABSL_HAVE_MMAP)
+#include <sys/mman.h>
+#endif
+
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+size_t GetPageSize() {
+#ifdef _WIN32
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ return system_info.dwPageSize;
+#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
+ return getpagesize();
+#else
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+#endif
+}
+
+} // namespace
+
+void* InitializePoisonedPointerInternal() {
+ const size_t block_size = GetPageSize();
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
+ void* data = malloc(block_size);
+ ASAN_POISON_MEMORY_REGION(data, block_size);
+#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
+ void* data = malloc(block_size);
+ __msan_poison(data, block_size);
+#elif defined(ABSL_HAVE_MMAP)
+ void* data = DirectMmap(nullptr, block_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (data == MAP_FAILED) return GetBadPointerInternal();
+#elif defined(_WIN32)
+ void* data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT,
+ PAGE_NOACCESS);
+ if (data == nullptr) return GetBadPointerInternal();
+#else
+ return GetBadPointerInternal();
+#endif
+ // Return the middle of the block so that dereferences before and after the
+ // pointer will both crash.
+ return static_cast<char*>(data) + block_size / 2;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/poison.h b/contrib/restricted/abseil-cpp/absl/base/internal/poison.h
new file mode 100644
index 0000000000..28113bdd71
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/poison.h
@@ -0,0 +1,59 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_POISON_H_
+#define ABSL_BASE_INTERNAL_POISON_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void* GetBadPointerInternal() {
+ // A likely bad pointer. Pointers are required to have high bits that are all
+ // zero or all one for certain 64-bit CPUs. This pointer value will hopefully
+ // cause a crash on dereference and also be clearly recognizable as invalid.
+ constexpr uint64_t kBadPtr = 0xBAD0BAD0BAD0BAD0;
+ auto ret = reinterpret_cast<void*>(static_cast<uintptr_t>(kBadPtr));
+#ifndef _MSC_VER // MSVC doesn't support inline asm with `volatile`.
+ // Try to prevent the compiler from optimizing out the undefined behavior.
+ asm volatile("" : : "r"(ret) :); // NOLINT
+#endif
+ return ret;
+}
+
+void* InitializePoisonedPointerInternal();
+
+inline void* get_poisoned_pointer() {
+#if defined(NDEBUG) && !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ !defined(ABSL_HAVE_MEMORY_SANITIZER)
+ // In optimized non-sanitized builds, avoid the function-local static because
+ // of the codegen and runtime cost.
+ return GetBadPointerInternal();
+#else
+ // Non-optimized builds may use more robust implementation. Note that we can't
+ // use a static global because Chromium doesn't allow non-constinit globals.
+ static void* ptr = InitializePoisonedPointerInternal();
+ return ptr;
+#endif
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_POISON_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
index 2929cd6f5c..1bb260f46b 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
@@ -53,7 +53,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-class ABSL_LOCKABLE SpinLock {
+class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
@@ -89,7 +89,8 @@ class ABSL_LOCKABLE SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
- inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ ABSL_MUST_USE_RESULT inline bool TryLock()
+ ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
@@ -120,7 +121,7 @@ class ABSL_LOCKABLE SpinLock {
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
- inline bool IsHeld() const {
+ ABSL_MUST_USE_RESULT inline bool IsHeld() const {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
@@ -202,6 +203,15 @@ class ABSL_LOCKABLE SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
+//
+// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
+// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] SpinLockHolder;
+#else
+class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
+#endif
+
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
index 05e0e7ba5e..a0bf3a65f7 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
@@ -121,18 +121,6 @@ double UnscaledCycleClock::Frequency() {
return aarch64_timer_frequency;
}
-#elif defined(__riscv)
-
-int64_t UnscaledCycleClock::Now() {
- int64_t virtual_timer_value;
- asm volatile("rdcycle %0" : "=r"(virtual_timer_value));
- return virtual_timer_value;
-}
-
-double UnscaledCycleClock::Frequency() {
- return base_internal::NominalCPUFrequency();
-}
-
#elif defined(_M_IX86) || defined(_M_X64)
#pragma intrinsic(__rdtsc)
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h
index 24b324ac99..43a3dabeea 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h
@@ -21,8 +21,8 @@
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
- defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
+ defined(__powerpc__) || defined(__ppc__) || defined(_M_IX86) || \
+ (defined(_M_X64) && !defined(_M_ARM64EC))
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
@@ -53,8 +53,8 @@
#if ABSL_USE_UNSCALED_CYCLECLOCK
// This macro can be used to test if UnscaledCycleClock::Frequency()
// is NominalCPUFrequency() on a particular platform.
-#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
- defined(_M_IX86) || defined(_M_X64))
+#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || \
+ defined(_M_X64))
#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
#endif
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/base/macros.h b/contrib/restricted/abseil-cpp/absl/base/macros.h
index f33cd1927b..b318f11664 100644
--- a/contrib/restricted/abseil-cpp/absl/base/macros.h
+++ b/contrib/restricted/abseil-cpp/absl/base/macros.h
@@ -138,4 +138,52 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_RETHROW do {} while (false)
#endif // ABSL_HAVE_EXCEPTIONS
+// ABSL_DEPRECATE_AND_INLINE()
+//
+// Marks a function or type alias as deprecated and tags it to be picked up for
+// automated refactoring by go/cpp-inliner. It can added to inline function
+// definitions or type aliases. It should only be used within a header file. It
+// differs from `ABSL_DEPRECATED` in the following ways:
+//
+// 1. New uses of the function or type will be discouraged via Tricorder
+// warnings.
+// 2. If enabled via `METADATA`, automated changes will be sent out inlining the
+// functions's body or replacing the type where it is used.
+//
+// For example:
+//
+// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) {
+// return NewFunc(x, 0);
+// }
+//
+// will mark `OldFunc` as deprecated, and the go/cpp-inliner service will
+// replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)`. Once all calls
+// to `OldFunc` have been replaced, `OldFunc` can be deleted.
+//
+// See go/cpp-inliner for more information.
+//
+// Note: go/cpp-inliner is Google-internal service for automated refactoring.
+// While open-source users do not have access to this service, the macro is
+// provided for compatibility, and so that users receive deprecation warnings.
+#if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \
+ ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]]
+#else
+#define ABSL_DEPRECATE_AND_INLINE()
+#endif
+
+// Requires the compiler to prove that the size of the given object is at least
+// the expected amount.
+#if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size)
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N) \
+ __attribute__((diagnose_if(__builtin_object_size(Obj, 0) < N, \
+ "object size provably too small " \
+ "(this would corrupt memory)", \
+ "error")))
+#else
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)
+#endif
+
#endif // ABSL_BASE_MACROS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/no_destructor.h b/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
index d4b16a6e88..43b3540aeb 100644
--- a/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
+++ b/contrib/restricted/abseil-cpp/absl/base/no_destructor.h
@@ -21,14 +21,13 @@
// such an object survives during program exit (and can be safely accessed at
// any time).
//
-// Objects of such type, if constructed safely and under the right conditions,
-// provide two main benefits over other alternatives:
-//
-// * Global objects not normally allowed due to concerns of destruction order
-// (i.e. no "complex globals") can be safely allowed, provided that such
-// objects can be constant initialized.
-// * Function scope static objects can be optimized to avoid heap allocation,
-// pointer chasing, and allow lazy construction.
+// absl::NoDestructor<T> is useful when when a variable has static storage
+// duration but its type has a non-trivial destructor. Global constructors are
+// not recommended because of the C++'s static initialization order fiasco (See
+// https://en.cppreference.com/w/cpp/language/siof). Global destructors are not
+// allowed due to similar concerns about destruction ordering. Using
+// absl::NoDestructor<T> as a function-local static prevents both of these
+// issues.
//
// See below for complete details.
@@ -41,6 +40,7 @@
#include <utility>
#include "absl/base/config.h"
+#include "absl/base/nullability.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -49,8 +49,8 @@ ABSL_NAMESPACE_BEGIN
//
// NoDestructor<T> is a wrapper around an object of type T that behaves as an
// object of type T but never calls T's destructor. NoDestructor<T> makes it
-// safer and/or more efficient to use such objects in static storage contexts:
-// as global or function scope static variables.
+// safer and/or more efficient to use such objects in static storage contexts,
+// ideally as function scope static variables.
//
// An instance of absl::NoDestructor<T> has similar type semantics to an
// instance of T:
@@ -61,9 +61,6 @@ ABSL_NAMESPACE_BEGIN
// `->`, `*`, and `get()`.
// (Note that `const NoDestructor<T>` works like a pointer to const `T`.)
//
-// An object of type NoDestructor<T> should be defined in static storage:
-// as either a global static object, or as a function scope static variable.
-//
// Additionally, NoDestructor<T> provides the following benefits:
//
// * Never calls T's destructor for the object
@@ -71,24 +68,7 @@ ABSL_NAMESPACE_BEGIN
// lazily constructed.
//
// An object of type NoDestructor<T> is "trivially destructible" in the notion
-// that its destructor is never run. Provided that an object of this type can be
-// safely initialized and does not need to be cleaned up on program shutdown,
-// NoDestructor<T> allows you to define global static variables, since Google's
-// C++ style guide ban on such objects doesn't apply to objects that are
-// trivially destructible.
-//
-// Usage as Global Static Variables
-//
-// NoDestructor<T> allows declaration of a global object with a non-trivial
-// constructor in static storage without needing to add a destructor.
-// However, such objects still need to worry about initialization order, so
-// such objects should be const initialized:
-//
-// // Global or namespace scope.
-// ABSL_CONST_INIT absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
-//
-// Note that if your object already has a trivial destructor, you don't need to
-// use NoDestructor<T>.
+// that its destructor is never run.
//
// Usage as Function Scope Static Variables
//
@@ -114,6 +94,21 @@ ABSL_NAMESPACE_BEGIN
// return *x;
// }
//
+// Usage as Global Static Variables
+//
+// NoDestructor<T> allows declaration of a global object of type T that has a
+// non-trivial destructor since its destructor is never run. However, such
+// objects still need to worry about initialization order, so such use is not
+// recommended, strongly discouraged by the Google C++ Style Guide, and outright
+// banned in Chromium.
+// See https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
+//
+// // Global or namespace scope.
+// absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
+//
+// Note that if your object already has a trivial destructor, you don't need to
+// use NoDestructor<T>.
+//
template <typename T>
class NoDestructor {
public:
@@ -140,11 +135,11 @@ class NoDestructor {
// Pretend to be a smart pointer to T with deep constness.
// Never returns a null pointer.
T& operator*() { return *get(); }
- T* operator->() { return get(); }
- T* get() { return impl_.get(); }
+ absl::Nonnull<T*> operator->() { return get(); }
+ absl::Nonnull<T*> get() { return impl_.get(); }
const T& operator*() const { return *get(); }
- const T* operator->() const { return get(); }
- const T* get() const { return impl_.get(); }
+ absl::Nonnull<const T*> operator->() const { return get(); }
+ absl::Nonnull<const T*> get() const { return impl_.get(); }
private:
class DirectImpl {
@@ -152,8 +147,8 @@ class NoDestructor {
template <typename... Args>
explicit constexpr DirectImpl(Args&&... args)
: value_(std::forward<Args>(args)...) {}
- const T* get() const { return &value_; }
- T* get() { return &value_; }
+ absl::Nonnull<const T*> get() const { return &value_; }
+ absl::Nonnull<T*> get() { return &value_; }
private:
T value_;
@@ -165,14 +160,14 @@ class NoDestructor {
explicit PlacementImpl(Args&&... args) {
new (&space_) T(std::forward<Args>(args)...);
}
- const T* get() const {
+ absl::Nonnull<const T*> get() const {
return Launder(reinterpret_cast<const T*>(&space_));
}
- T* get() { return Launder(reinterpret_cast<T*>(&space_)); }
+ absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
private:
template <typename P>
- static P* Launder(P* p) {
+ static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return std::launder(p);
#elif ABSL_HAVE_BUILTIN(__builtin_launder)
diff --git a/contrib/restricted/abseil-cpp/absl/base/nullability.h b/contrib/restricted/abseil-cpp/absl/base/nullability.h
index 6f49b6f535..34dc083a4e 100644
--- a/contrib/restricted/abseil-cpp/absl/base/nullability.h
+++ b/contrib/restricted/abseil-cpp/absl/base/nullability.h
@@ -128,9 +128,17 @@
//
// By default, nullability annotations are applicable to raw and smart
// pointers. User-defined types can indicate compatibility with nullability
-// annotations by providing an `absl_nullability_compatible` nested type. The
-// actual definition of this inner type is not relevant as it is used merely as
-// a marker. It is common to use a using declaration of
+// annotations by adding the ABSL_NULLABILITY_COMPATIBLE attribute.
+//
+// // Example:
+// struct ABSL_NULLABILITY_COMPATIBLE MyPtr {
+// ...
+// };
+//
+// Note: For the time being, nullability-compatible classes should additionally
+// be marked with an `absl_nullability_compatible` nested type (this will soon
+// be deprecated). The actual definition of this inner type is not relevant as
+// it is used merely as a marker. It is common to use a using declaration of
// `absl_nullability_compatible` set to void.
//
// // Example:
@@ -150,14 +158,16 @@
#ifndef ABSL_BASE_NULLABILITY_H_
#define ABSL_BASE_NULLABILITY_H_
+#include "absl/base/config.h"
#include "absl/base/internal/nullability_impl.h"
namespace absl {
+ABSL_NAMESPACE_BEGIN
// absl::Nonnull
//
// The indicated pointer is never null. It is the responsibility of the provider
-// of this pointer across an API boundary to ensure that the pointer is never be
+// of this pointer across an API boundary to ensure that the pointer is never
// set to null. Consumers of this pointer across an API boundary may safely
// dereference the pointer.
//
@@ -198,9 +208,9 @@ using Nullable = nullability_internal::NullableImpl<T>;
// migrated into one of the above two nullability states: `Nonnull<T>` or
// `Nullable<T>`.
//
-// NOTE: Because this annotation is the global default state, pointers without
-// any annotation are assumed to have "unknown" semantics. This assumption is
-// designed to minimize churn and reduce clutter within the codebase.
+// NOTE: Because this annotation is the global default state, unannotated
+// pointers are assumed to have "unknown" semantics. This assumption is designed
+// to minimize churn and reduce clutter within the codebase.
//
// Example:
//
@@ -219,6 +229,22 @@ using Nullable = nullability_internal::NullableImpl<T>;
template <typename T>
using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
+ABSL_NAMESPACE_END
} // namespace absl
+// ABSL_NULLABILITY_COMPATIBLE
+//
+// Indicates that a class is compatible with nullability annotations.
+//
+// For example:
+//
+// struct ABSL_NULLABILITY_COMPATIBLE MyPtr {
+// ...
+// };
+#if ABSL_HAVE_FEATURE(nullability_on_classes)
+#define ABSL_NULLABILITY_COMPATIBLE _Nullable
+#else
+#define ABSL_NULLABILITY_COMPATIBLE
+#endif
+
#endif // ABSL_BASE_NULLABILITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/optimization.h b/contrib/restricted/abseil-cpp/absl/base/optimization.h
index f985995899..3aa66e1c7d 100644
--- a/contrib/restricted/abseil-cpp/absl/base/optimization.h
+++ b/contrib/restricted/abseil-cpp/absl/base/optimization.h
@@ -18,12 +18,23 @@
// -----------------------------------------------------------------------------
//
// This header file defines portable macros for performance optimization.
+//
+// This header is included in both C++ code and legacy C code and thus must
+// remain compatible with both C and C++. C compatibility will be removed if
+// the legacy code is removed or converted to C++. Do not include this header in
+// new code that requires C compatibility or assume C compatibility will remain
+// indefinitely.
#ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_
#include <assert.h>
+#ifdef __cplusplus
+// Included for std::unreachable()
+#include <utility>
+#endif // __cplusplus
+
#include "absl/base/config.h"
#include "absl/base/options.h"
diff --git a/contrib/restricted/abseil-cpp/absl/base/options.h b/contrib/restricted/abseil-cpp/absl/base/options.h
index d7bf8cffe6..61a996b6c8 100644
--- a/contrib/restricted/abseil-cpp/absl/base/options.h
+++ b/contrib/restricted/abseil-cpp/absl/base/options.h
@@ -226,7 +226,7 @@
// allowed.
#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240116
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240722
// ABSL_OPTION_HARDENED
//
diff --git a/contrib/restricted/abseil-cpp/absl/base/prefetch.h b/contrib/restricted/abseil-cpp/absl/base/prefetch.h
index eb40a4453c..482cde3a01 100644
--- a/contrib/restricted/abseil-cpp/absl/base/prefetch.h
+++ b/contrib/restricted/abseil-cpp/absl/base/prefetch.h
@@ -129,7 +129,7 @@ void PrefetchToLocalCacheNta(const void* addr);
//
// void* Arena::Allocate(size_t size) {
// void* ptr = AllocateBlock(size);
-// absl::PrefetchToLocalCacheForWrite(p);
+// absl::PrefetchToLocalCacheForWrite(ptr);
// return ptr;
// }
//
diff --git a/contrib/restricted/abseil-cpp/absl/base/ya.make b/contrib/restricted/abseil-cpp/absl/base/ya.make
index 70ca844324..b19ed93c5d 100644
--- a/contrib/restricted/abseil-cpp/absl/base/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/ya.make
@@ -21,6 +21,7 @@ NO_UTIL()
SRCS(
internal/cycleclock.cc
internal/low_level_alloc.cc
+ internal/poison.cc
internal/raw_logging.cc
internal/scoped_set_env.cc
internal/spinlock.cc
diff --git a/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
index acd013b026..ebd9ed6756 100644
--- a/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/flat_hash_map.h
@@ -26,21 +26,24 @@
//
// In most cases, your default choice for a hash map should be a map of type
// `flat_hash_map`.
+//
+// `flat_hash_map` is not exception-safe.
#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
#include <cstddef>
-#include <new>
+#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
+#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
-#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -62,7 +65,7 @@ struct FlatHashMapPolicy;
// * Requires values that are MoveConstructible
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous
-// hashing function and equality operator.
+// hashing function and equality operator. See below for details.
// * Invalidates any references and pointers to elements within the table after
// `rehash()` and when the table is moved.
// * Contains a `capacity()` member function indicating the number of element
@@ -80,6 +83,19 @@ struct FlatHashMapPolicy;
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
@@ -106,13 +122,13 @@ struct FlatHashMapPolicy;
// if (result != ducks.end()) {
// std::cout << "Result: " << result->second << std::endl;
// }
-template <class K, class V,
- class Hash = absl::container_internal::hash_default_hash<K>,
- class Eq = absl::container_internal::hash_default_eq<K>,
+template <class K, class V, class Hash = DefaultHashContainerHash<K>,
+ class Eq = DefaultHashContainerEq<K>,
class Allocator = std::allocator<std::pair<const K, V>>>
-class flat_hash_map : public absl::container_internal::raw_hash_map<
- absl::container_internal::FlatHashMapPolicy<K, V>,
- Hash, Eq, Allocator> {
+class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_map
+ : public absl::container_internal::raw_hash_map<
+ absl::container_internal::FlatHashMapPolicy<K, V>, Hash, Eq,
+ Allocator> {
using Base = typename flat_hash_map::raw_hash_map;
public:
@@ -560,6 +576,38 @@ typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
namespace container_internal {
+// c_for_each_fast(flat_hash_map<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Function>
+decay_t<Function> c_for_each_fast(const flat_hash_map<K, V, H, E, A>& c,
+ Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>& c,
+ Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>&& c,
+ Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+
+} // namespace container_internal
+
+namespace container_internal {
+
template <class K, class V>
struct FlatHashMapPolicy {
using slot_policy = container_internal::map_slot_policy<K, V>;
@@ -573,9 +621,10 @@ struct FlatHashMapPolicy {
slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
}
+ // Returns std::true_type in case destroy is trivial.
template <class Allocator>
- static void destroy(Allocator* alloc, slot_type* slot) {
- slot_policy::destroy(alloc, slot);
+ static auto destroy(Allocator* alloc, slot_type* slot) {
+ return slot_policy::destroy(alloc, slot);
}
template <class Allocator>
@@ -592,6 +641,13 @@ struct FlatHashMapPolicy {
std::forward<Args>(args)...);
}
+ template <class Hash>
+ static constexpr HashSlotFn get_hash_slot_fn() {
+ return memory_internal::IsLayoutCompatible<K, V>::value
+ ? &TypeErasedApplyToSlotFn<Hash, K>
+ : nullptr;
+ }
+
static size_t space_used(const slot_type*) { return 0; }
static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
diff --git a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
index a94a82a0bf..a3e36e05e3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/flat_hash_set.h
@@ -26,18 +26,25 @@
//
// In most cases, your default choice for a hash set should be a set of type
// `flat_hash_set`.
+//
+// `flat_hash_set` is not exception-safe.
+
#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
#define ABSL_CONTAINER_FLAT_HASH_SET_H_
+#include <cstddef>
+#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
+#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -58,7 +65,7 @@ struct FlatHashSetPolicy;
// * Requires keys that are CopyConstructible
// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
// that the set is provided a compatible heterogeneous hashing function and
-// equality operator.
+// equality operator. See below for details.
// * Invalidates any references and pointers to elements within the table after
// `rehash()` and when the table is moved.
// * Contains a `capacity()` member function indicating the number of element
@@ -76,6 +83,19 @@ struct FlatHashSetPolicy;
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
@@ -99,10 +119,10 @@ struct FlatHashSetPolicy;
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
-template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
- class Eq = absl::container_internal::hash_default_eq<T>,
+template <class T, class Hash = DefaultHashContainerHash<T>,
+ class Eq = DefaultHashContainerEq<T>,
class Allocator = std::allocator<T>>
-class flat_hash_set
+class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;
@@ -460,6 +480,33 @@ typename flat_hash_set<T, H, E, A>::size_type erase_if(
namespace container_internal {
+// c_for_each_fast(flat_hash_set<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(const flat_hash_set<T, H, E, A>& c,
+ Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>& c, Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>&& c, Function&& f) {
+ container_internal::ForEach(f, &c);
+ return f;
+}
+
+} // namespace container_internal
+
+namespace container_internal {
+
template <class T>
struct FlatHashSetPolicy {
using slot_type = T;
@@ -473,9 +520,11 @@ struct FlatHashSetPolicy {
std::forward<Args>(args)...);
}
+ // Return std::true_type in case destroy is trivial.
template <class Allocator>
- static void destroy(Allocator* alloc, slot_type* slot) {
+ static auto destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+ return IsDestructionTrivial<Allocator, slot_type>();
}
static T& element(slot_type* slot) { return *slot; }
@@ -489,6 +538,11 @@ struct FlatHashSetPolicy {
}
static size_t space_used(const T*) { return 0; }
+
+ template <class Hash>
+ static constexpr HashSlotFn get_hash_slot_fn() {
+ return &TypeErasedApplyToSlotFn<Hash, T>;
+ }
};
} // namespace container_internal
diff --git a/contrib/restricted/abseil-cpp/absl/container/hash_container_defaults.h b/contrib/restricted/abseil-cpp/absl/container/hash_container_defaults.h
new file mode 100644
index 0000000000..eb944a7c81
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/container/hash_container_defaults.h
@@ -0,0 +1,45 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
+#define ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
+
+#include "absl/base/config.h"
+#include "absl/container/internal/hash_function_defaults.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// DefaultHashContainerHash is a convenience alias for the functor that is used
+// by default by Abseil hash-based (unordered) containers for hashing when
+// `Hash` type argument is not explicitly specified.
+//
+// This type alias can be used by generic code that wants to provide more
+// flexibility for defining underlying containers.
+template <typename T>
+using DefaultHashContainerHash = absl::container_internal::hash_default_hash<T>;
+
+// DefaultHashContainerEq is a convenience alias for the functor that is used by
+// default by Abseil hash-based (unordered) containers for equality check when
+// `Eq` type argument is not explicitly specified.
+//
+// This type alias can be used by generic code that wants to provide more
+// flexibility for defining underlying containers.
+template <typename T>
+using DefaultHashContainerEq = absl::container_internal::hash_default_eq<T>;
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
index 04e2c38567..974b6521a0 100644
--- a/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/inlined_vector.h
@@ -775,7 +775,20 @@ class InlinedVector {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos < end());
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+ // It appears that GCC thinks that since `pos` is a const pointer and may
+ // point to uninitialized memory at this point, a warning should be
+ // issued. But `pos` is actually only used to compute an array index to
+ // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
return storage_.Erase(pos, pos + 1);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
// Overload of `InlinedVector::erase(...)` that erases every element in the
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h b/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
index 57eac678fa..c521f6122e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/common_policy_traits.h
@@ -45,9 +45,10 @@ struct common_policy_traits {
// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is UNINITIALIZED
+ // Returns std::true_type in case destroy is trivial.
template <class Alloc>
- static void destroy(Alloc* alloc, slot_type* slot) {
- Policy::destroy(alloc, slot);
+ static auto destroy(Alloc* alloc, slot_type* slot) {
+ return Policy::destroy(alloc, slot);
}
// Transfers the `old_slot` to `new_slot`. Any memory allocated by the
@@ -63,7 +64,7 @@ struct common_policy_traits {
// UNINITIALIZED
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
- transfer_impl(alloc, new_slot, old_slot, Rank0{});
+ transfer_impl(alloc, new_slot, old_slot, Rank2{});
}
// PRECONDITION: `slot` is INITIALIZED
@@ -82,23 +83,31 @@ struct common_policy_traits {
static constexpr bool transfer_uses_memcpy() {
return std::is_same<decltype(transfer_impl<std::allocator<char>>(
- nullptr, nullptr, nullptr, Rank0{})),
+ nullptr, nullptr, nullptr, Rank2{})),
+ std::true_type>::value;
+ }
+
+ // Returns true if destroy is trivial and can be omitted.
+ template <class Alloc>
+ static constexpr bool destroy_is_trivial() {
+ return std::is_same<decltype(destroy<Alloc>(nullptr, nullptr)),
std::true_type>::value;
}
private:
- // To rank the overloads below for overload resolution. Rank0 is preferred.
- struct Rank2 {};
- struct Rank1 : Rank2 {};
- struct Rank0 : Rank1 {};
+ // Use go/ranked-overloads for dispatching.
+ struct Rank0 {};
+ struct Rank1 : Rank0 {};
+ struct Rank2 : Rank1 {};
// Use auto -> decltype as an enabler.
// P::transfer returns std::true_type if transfer uses memcpy (e.g. in
// node_slot_policy).
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, Rank0)
- -> decltype(P::transfer(alloc, new_slot, old_slot)) {
+ slot_type* old_slot,
+ Rank2) -> decltype(P::transfer(alloc, new_slot,
+ old_slot)) {
return P::transfer(alloc, new_slot, old_slot);
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
@@ -121,7 +130,7 @@ struct common_policy_traits {
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, Rank2) {
+ slot_type* old_slot, Rank0) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h b/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
index 59e70eb21d..6db0468d99 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/compressed_tuple.h
@@ -87,11 +87,11 @@ struct Storage {
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(absl::in_place_t, V&& v)
- : value(absl::forward<V>(v)) {}
+ : value(std::forward<V>(v)) {}
constexpr const T& get() const& { return value; }
- T& get() & { return value; }
- constexpr const T&& get() const&& { return absl::move(*this).value; }
- T&& get() && { return std::move(*this).value; }
+ constexpr T& get() & { return value; }
+ constexpr const T&& get() const&& { return std::move(*this).value; }
+ constexpr T&& get() && { return std::move(*this).value; }
};
template <typename T, size_t I>
@@ -99,13 +99,12 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
template <typename V>
- explicit constexpr Storage(absl::in_place_t, V&& v)
- : T(absl::forward<V>(v)) {}
+ explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward<V>(v)) {}
constexpr const T& get() const& { return *this; }
- T& get() & { return *this; }
- constexpr const T&& get() const&& { return absl::move(*this); }
- T&& get() && { return std::move(*this); }
+ constexpr T& get() & { return *this; }
+ constexpr const T&& get() const&& { return std::move(*this); }
+ constexpr T&& get() && { return std::move(*this); }
};
template <typename D, typename I, bool ShouldAnyUseBase>
@@ -123,7 +122,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
- : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+ : Storage<Ts, I>(absl::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
@@ -135,7 +134,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
- : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+ : Storage<Ts, I, false>(absl::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
@@ -234,11 +233,11 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
- absl::forward<First>(first),
- absl::forward<Vs>(base)...) {}
+ std::forward<First>(first),
+ std::forward<Vs>(base)...) {}
template <int I>
- ElemT<I>& get() & {
+ constexpr ElemT<I>& get() & {
return StorageT<I>::get();
}
@@ -248,13 +247,13 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
}
template <int I>
- ElemT<I>&& get() && {
+ constexpr ElemT<I>&& get() && {
return std::move(*this).StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
- return absl::move(*this).StorageT<I>::get();
+ return std::move(*this).StorageT<I>::get();
}
};
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
index 3262d4eb8d..ba8e08a2d2 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/container_memory.h
@@ -68,6 +68,18 @@ void* Allocate(Alloc* alloc, size_t n) {
return p;
}
+// Returns true if the destruction of the value with given Allocator will be
+// trivial.
+template <class Allocator, class ValueType>
+constexpr auto IsDestructionTrivial() {
+ constexpr bool result =
+ std::is_trivially_destructible<ValueType>::value &&
+ std::is_same<typename absl::allocator_traits<
+ Allocator>::template rebind_alloc<char>,
+ std::allocator<char>>::value;
+ return std::integral_constant<bool, result>();
+}
+
// The pointer must have been previously obtained by calling
// Allocate<Alignment>(alloc, n).
template <size_t Alignment, class Alloc>
@@ -414,12 +426,13 @@ struct map_slot_policy {
}
template <class Allocator>
- static void destroy(Allocator* alloc, slot_type* slot) {
+ static auto destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
} else {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
}
+ return IsDestructionTrivial<Allocator, value_type>();
}
template <class Allocator>
@@ -451,6 +464,26 @@ struct map_slot_policy {
}
};
+// Type erased function for computing hash of the slot.
+using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
+
+// Type erased function to apply `Fn` to data inside of the `slot`.
+// The data is expected to have type `T`.
+template <class Fn, class T>
+size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) {
+ const auto* f = static_cast<const Fn*>(fn);
+ return (*f)(*static_cast<const T*>(slot));
+}
+
+// Type erased function to apply `Fn` to data inside of the `*slot_ptr`.
+// The data is expected to have type `T`.
+template <class Fn, class T>
+size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) {
+ const auto* f = static_cast<const Fn*>(fn);
+ const T* slot = *static_cast<const T**>(slot_ptr);
+ return (*f)(*slot);
+}
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
index a3613b4bc5..0f07bcfe29 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -45,14 +45,16 @@
#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
-#include <stdint.h>
#include <cstddef>
+#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
+#include "absl/container/internal/common.h"
#include "absl/hash/hash.h"
+#include "absl/meta/type_traits.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
@@ -188,6 +190,71 @@ struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
template <class T>
struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+template <typename T, typename E = void>
+struct HasAbslContainerHash : std::false_type {};
+
+template <typename T>
+struct HasAbslContainerHash<T, absl::void_t<typename T::absl_container_hash>>
+ : std::true_type {};
+
+template <typename T, typename E = void>
+struct HasAbslContainerEq : std::false_type {};
+
+template <typename T>
+struct HasAbslContainerEq<T, absl::void_t<typename T::absl_container_eq>>
+ : std::true_type {};
+
+template <typename T, typename E = void>
+struct AbslContainerEq {
+ using type = std::equal_to<>;
+};
+
+template <typename T>
+struct AbslContainerEq<
+ T, typename std::enable_if_t<HasAbslContainerEq<T>::value>> {
+ using type = typename T::absl_container_eq;
+};
+
+template <typename T, typename E = void>
+struct AbslContainerHash {
+ using type = void;
+};
+
+template <typename T>
+struct AbslContainerHash<
+ T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
+ using type = typename T::absl_container_hash;
+};
+
+// HashEq specialization for user types that provide `absl_container_hash` and
+// (optionally) `absl_container_eq`. This specialization allows user types to
+// provide heterogeneous lookup without requiring to explicitly specify Hash/Eq
+// type arguments in unordered Abseil containers.
+//
+// Both `absl_container_hash` and `absl_container_eq` should be transparent
+// (have inner is_transparent type). While there is no technical reason to
+// restrict to transparent-only types, there is also no feasible use case when
+// it shouldn't be transparent - it is easier to relax the requirement later if
+// such a case arises rather than restricting it.
+//
+// If type provides only `absl_container_hash` then `eq` part will be
+// `std::equal_to<void>`.
+//
+// User types are not allowed to provide only a `Eq` part as there is no
+// feasible use case for this behavior - if Hash should be a default one then Eq
+// should be an equivalent to the `std::equal_to<T>`.
+template <typename T>
+struct HashEq<T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
+ using Hash = typename AbslContainerHash<T>::type;
+ using Eq = typename AbslContainerEq<T>::type;
+ static_assert(IsTransparent<Hash>::value,
+ "absl_container_hash must be transparent. To achieve it add a "
+ "`using is_transparent = void;` clause to this type.");
+ static_assert(IsTransparent<Eq>::value,
+ "absl_container_eq must be transparent. To achieve it add a "
+ "`using is_transparent = void;` clause to this type.");
+};
+
// This header's visibility is restricted. If you need to access the default
// hasher please use the container's ::hasher alias instead.
//
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
index 164ec12316..ad835d6fcd 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -148,6 +148,56 @@ struct hash_policy_traits : common_policy_traits<Policy> {
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}
+
+ using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
+
+ template <class Hash>
+ static constexpr HashSlotFn get_hash_slot_fn() {
+// get_hash_slot_fn may return nullptr to signal that non type erased function
+// should be used. GCC warns against comparing function address with nullptr.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+// silent error: the address of * will never be NULL [-Werror=address]
+#pragma GCC diagnostic ignored "-Waddress"
+#endif
+ return Policy::template get_hash_slot_fn<Hash>() == nullptr
+ ? &hash_slot_fn_non_type_erased<Hash>
+ : Policy::template get_hash_slot_fn<Hash>();
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+ }
+
+ // Whether small object optimization is enabled. True by default.
+ static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); }
+
+ private:
+ template <class Hash>
+ struct HashElement {
+ template <class K, class... Args>
+ size_t operator()(const K& key, Args&&...) const {
+ return h(key);
+ }
+ const Hash& h;
+ };
+
+ template <class Hash>
+ static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot) {
+ return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
+ Policy::element(static_cast<slot_type*>(slot)));
+ }
+
+ // Use go/ranked-overloads for dispatching. Rank1 is preferred.
+ struct Rank0 {};
+ struct Rank1 : Rank0 {};
+
+ // Use auto -> decltype as an enabler.
+ template <class P = Policy>
+ static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) {
+ return P::soo_enabled();
+ }
+
+ static constexpr bool soo_enabled_impl(Rank0) { return true; }
};
} // namespace container_internal
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index 79a0973a6d..fd21d966b7 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -18,12 +18,18 @@
#include <atomic>
#include <cassert>
#include <cmath>
+#include <cstddef>
+#include <cstdint>
#include <functional>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/base/no_destructor.h"
+#include "absl/base/optimization.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
@@ -64,7 +70,7 @@ ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& GlobalHashtablezSampler() {
- static auto* sampler = new HashtablezSampler();
+ static absl::NoDestructor<HashtablezSampler> sampler;
return *sampler;
}
@@ -72,7 +78,10 @@ HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
void HashtablezInfo::PrepareForSampling(int64_t stride,
- size_t inline_element_size_value) {
+ size_t inline_element_size_value,
+ size_t key_size_value,
+ size_t value_size_value,
+ uint16_t soo_capacity_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -92,6 +101,9 @@ void HashtablezInfo::PrepareForSampling(int64_t stride,
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
inline_element_size = inline_element_size_value;
+ key_size = key_size_value;
+ value_size = value_size_value;
+ soo_capacity = soo_capacity_value;
}
static bool ShouldForceSampling() {
@@ -115,12 +127,13 @@ static bool ShouldForceSampling() {
}
HashtablezInfo* SampleSlow(SamplingState& next_sample,
- size_t inline_element_size) {
+ size_t inline_element_size, size_t key_size,
+ size_t value_size, uint16_t soo_capacity) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
next_sample.next_sample = 1;
const int64_t old_stride = exchange(next_sample.sample_stride, 1);
- HashtablezInfo* result =
- GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+ HashtablezInfo* result = GlobalHashtablezSampler().Register(
+ old_stride, inline_element_size, key_size, value_size, soo_capacity);
return result;
}
@@ -150,10 +163,12 @@ HashtablezInfo* SampleSlow(SamplingState& next_sample,
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
- return SampleSlow(next_sample, inline_element_size);
+ return SampleSlow(next_sample, inline_element_size, key_size, value_size,
+ soo_capacity);
}
- return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size,
+ key_size, value_size, soo_capacity);
#endif
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index e41ee2d747..d74acf8c6e 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -40,15 +40,20 @@
#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
#include <atomic>
+#include <cstddef>
+#include <cstdint>
#include <functional>
#include <memory>
#include <vector>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
+#include "absl/base/thread_annotations.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
#include "absl/utility/utility.h"
namespace absl {
@@ -67,7 +72,9 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+ void PrepareForSampling(int64_t stride, size_t inline_element_size_value,
+ size_t key_size, size_t value_size,
+ uint16_t soo_capacity_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
@@ -91,8 +98,15 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
+ // The SOO capacity for this table in elements (not bytes). Note that sampled
+ // tables are never SOO because we need to store the infoz handle on the heap.
+ // Tables that would be SOO if not sampled should have: soo_capacity > 0 &&
+ // size <= soo_capacity && max_reserve <= soo_capacity.
+ uint16_t soo_capacity;
void* stack[kMaxStackDepth];
- size_t inline_element_size; // How big is the slot?
+ size_t inline_element_size; // How big is the slot in bytes?
+ size_t key_size; // sizeof(key_type)
+ size_t value_size; // sizeof(value_type)
};
void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length);
@@ -117,7 +131,8 @@ struct SamplingState {
};
HashtablezInfo* SampleSlow(SamplingState& next_sample,
- size_t inline_element_size);
+ size_t inline_element_size, size_t key_size,
+ size_t value_size, uint16_t soo_capacity);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -204,16 +219,19 @@ class HashtablezInfoHandle {
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-// Returns an RAII sampling handle that manages registration and unregistation
-// with the global sampler.
+// Returns a sampling handle.
inline HashtablezInfoHandle Sample(
- size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
+ ABSL_ATTRIBUTE_UNUSED size_t inline_element_size,
+ ABSL_ATTRIBUTE_UNUSED size_t key_size,
+ ABSL_ATTRIBUTE_UNUSED size_t value_size,
+ ABSL_ATTRIBUTE_UNUSED uint16_t soo_capacity) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(
- SampleSlow(global_next_sample, inline_element_size));
+ return HashtablezInfoHandle(SampleSlow(global_next_sample,
+ inline_element_size, key_size,
+ value_size, soo_capacity));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
index 0eb9c34ddd..2f24e4616f 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -27,6 +27,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/internal/identity.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -82,16 +83,6 @@ using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
-template <typename T>
-struct TypeIdentity {
- using type = T;
-};
-
-// Used for function arguments in template functions to prevent ADL by forcing
-// callers to explicitly specify the template parameter.
-template <typename T>
-using NoTypeDeduction = typename TypeIdentity<T>::type;
-
template <typename A, bool IsTriviallyDestructible =
absl::is_trivially_destructible<ValueType<A>>::value>
struct DestroyAdapter;
@@ -139,7 +130,7 @@ struct MallocAdapter {
};
template <typename A, typename ValueAdapter>
-void ConstructElements(NoTypeDeduction<A>& allocator,
+void ConstructElements(absl::internal::type_identity_t<A>& allocator,
Pointer<A> construct_first, ValueAdapter& values,
SizeType<A> construct_size) {
for (SizeType<A> i = 0; i < construct_size; ++i) {
@@ -322,14 +313,13 @@ class Storage {
// The policy to be used specifically when swapping inlined elements.
using SwapInlinedElementsPolicy = absl::conditional_t<
- // Fast path: if the value type can be trivially move constructed/assigned
- // and destroyed, and we know the allocator doesn't do anything fancy,
- // then it's safe for us to simply swap the bytes in the inline storage.
- // It's as if we had move-constructed a temporary vector, move-assigned
- // one to the other, then move-assigned the first from the temporary.
- absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
- absl::is_trivially_move_assignable<ValueType<A>>,
- absl::is_trivially_destructible<ValueType<A>>,
+ // Fast path: if the value type can be trivially relocated, and we
+ // know the allocator doesn't do anything fancy, then it's safe for us
+ // to simply swap the bytes in the inline storage. It's as if we had
+ // relocated the first vector's elements into temporary storage,
+ // relocated the second's elements into the (now-empty) first's,
+ // and then relocated from temporary storage into the second.
+ absl::conjunction<absl::is_trivially_relocatable<ValueType<A>>,
std::is_same<A, std::allocator<ValueType<A>>>>::value,
MemcpyPolicy,
absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
@@ -624,8 +614,8 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Initialize(ValueAdapter values,
+ SizeType<A> new_size) -> void {
// Only callable from constructors!
ABSL_HARDENING_ASSERT(!GetIsAllocated());
ABSL_HARDENING_ASSERT(GetSize() == 0);
@@ -656,8 +646,8 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Assign(ValueAdapter values,
+ SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
@@ -699,8 +689,8 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Resize(ValueAdapter values,
+ SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
@@ -885,8 +875,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
- -> Iterator<A> {
+auto Storage<T, N, A>::Erase(ConstIterator<A> from,
+ ConstIterator<A> to) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
@@ -894,16 +884,30 @@ auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
std::distance(ConstIterator<A>(storage_view.data), from));
SizeType<A> erase_end_index = erase_index + erase_size;
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(storage_view.data + erase_end_index));
-
- AssignElements<A>(storage_view.data + erase_index, move_values,
- storage_view.size - erase_end_index);
+ // Fast path: if the value type is trivially relocatable and we know
+ // the allocator doesn't do anything fancy, then we know it is legal for us to
+ // simply destroy the elements in the "erasure window" (which cannot throw)
+ // and then memcpy downward to close the window.
+ if (absl::is_trivially_relocatable<ValueType<A>>::value &&
+ std::is_nothrow_destructible<ValueType<A>>::value &&
+ std::is_same<A, std::allocator<ValueType<A>>>::value) {
+ DestroyAdapter<A>::DestroyElements(
+ GetAllocator(), storage_view.data + erase_index, erase_size);
+ std::memmove(
+ reinterpret_cast<char*>(storage_view.data + erase_index),
+ reinterpret_cast<const char*>(storage_view.data + erase_end_index),
+ (storage_view.size - erase_end_index) * sizeof(ValueType<A>));
+ } else {
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data + erase_end_index));
- DestroyAdapter<A>::DestroyElements(
- GetAllocator(), storage_view.data + (storage_view.size - erase_size),
- erase_size);
+ AssignElements<A>(storage_view.data + erase_index, move_values,
+ storage_view.size - erase_end_index);
+ DestroyAdapter<A>::DestroyElements(
+ GetAllocator(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
+ }
SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
}
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
index 97182bc7bd..464bf23bd3 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -198,22 +198,24 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
- if (res.second)
+ if (res.second) {
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
- else
- Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
- return {this->iterator_at(res.first), res.second};
+ } else {
+ Policy::value(&*res.first) = std::forward<V>(v);
+ }
+ return res;
}
template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
- if (res.second)
+ if (res.second) {
this->emplace_at(res.first, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
- return {this->iterator_at(res.first), res.second};
+ }
+ return res;
}
};
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 9f8ea51987..1cae03819d 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -23,19 +23,24 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/hash/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
-// We have space for `growth_left` before a single block of control bytes. A
+// Represents a control byte corresponding to a full slot with arbitrary hash.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+
+// We have space for `growth_info` before a single block of control bytes. A
// single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find(). In order to ensure
// that the control bytes are aligned to 16, we have 16 bytes before the control
-// bytes even though growth_left only needs 8.
-constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+// bytes even though growth_info only needs 8.
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
@@ -46,6 +51,18 @@ alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+// We need one full byte followed by a sentinel byte for iterator::operator++ to
+// work. We have a full group after kSentinel to be safe (in case operator++ is
+// changed to read a full group).
+ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = {
+ ZeroCtrlT(), ctrl_t::kSentinel, ZeroCtrlT(), ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty};
+static_assert(NumControlBytes(SooCapacity()) <= 17,
+ "kSooControl capacity too small");
+
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
@@ -104,10 +121,25 @@ bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
return ShouldRehashForBugDetection(ctrl, capacity);
}
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
+bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
+ const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+ return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
+ CommonFields& common) {
+ assert(common.capacity() == NextCapacity(SooCapacity()));
+ // After resize from capacity 1 to 3, we always have exactly the slot with
+ // index 1 occupied, so we need to insert either at index 0 or index 2.
+ assert(HashSetResizeHelper::SooSlotIndex() == 1);
+ PrepareInsertCommon(common);
+ const size_t offset = H1(hash, common.control()) & 2;
+ common.growth_info().OverwriteEmptyAsFull();
+ SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
+ common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
+ return offset;
}
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
@@ -128,6 +160,8 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
+namespace {
+
// Returns the address of the slot just after slot assuming each slot has the
// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
@@ -140,8 +174,22 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
+// Finds guaranteed to exists empty slot from the given position.
+// NOTE: this function is almost never triggered inside of the
+// DropDeletesWithoutResize, so we keep it simple.
+// The table is rather sparse, so empty slot will be found very quickly.
+size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
+ for (size_t i = start; i < end; ++i) {
+ if (IsEmpty(ctrl[i])) {
+ return i;
+ }
+ }
+ assert(false && "no empty slot");
+ return ~size_t{};
+}
+
void DropDeletesWithoutResize(CommonFields& common,
- const PolicyFunctions& policy, void* tmp_space) {
+ const PolicyFunctions& policy) {
void* set = &common;
void* slot_array = common.slot_array();
const size_t capacity = common.capacity();
@@ -165,17 +213,28 @@ void DropDeletesWithoutResize(CommonFields& common,
// repeat procedure for current slot with moved from element (target)
ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
+ const void* hash_fn = policy.hash_fn(common);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
const size_t slot_size = policy.slot_size;
size_t total_probe_length = 0;
void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
+
+ // The index of an empty slot that can be used as temporary memory for
+ // the swap operation.
+ constexpr size_t kUnknownId = ~size_t{};
+ size_t tmp_space_id = kUnknownId;
+
for (size_t i = 0; i != capacity;
++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+ if (IsEmpty(ctrl[i])) {
+ tmp_space_id = i;
+ continue;
+ }
if (!IsDeleted(ctrl[i])) continue;
- const size_t hash = (*hasher)(set, slot_ptr);
+ const size_t hash = (*hasher)(hash_fn, slot_ptr);
const FindInfo target = find_first_non_full(common, hash);
const size_t new_i = target.offset;
total_probe_length += target.probe_length;
@@ -202,16 +261,26 @@ void DropDeletesWithoutResize(CommonFields& common,
SetCtrl(common, new_i, H2(hash), slot_size);
(*transfer)(set, new_slot_ptr, slot_ptr);
SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
+ // Initialize or change empty space id.
+ tmp_space_id = i;
} else {
assert(IsDeleted(ctrl[new_i]));
SetCtrl(common, new_i, H2(hash), slot_size);
// Until we are done rehashing, DELETED marks previously FULL slots.
+ if (tmp_space_id == kUnknownId) {
+ tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
+ }
+ void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
+ SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
+
// Swap i and new_i elements.
(*transfer)(set, tmp_space, new_slot_ptr);
(*transfer)(set, new_slot_ptr, slot_ptr);
(*transfer)(set, slot_ptr, tmp_space);
+ SanitizerPoisonMemoryRegion(tmp_space, slot_size);
+
// repeat the processing of the ith slot
--i;
slot_ptr = PrevSlot(slot_ptr, slot_size);
@@ -238,6 +307,8 @@ static bool WasNeverFull(CommonFields& c, size_t index) {
Group::kWidth;
}
+} // namespace
+
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
c.decrement_size();
@@ -245,17 +316,19 @@ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
if (WasNeverFull(c, index)) {
SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
- c.set_growth_left(c.growth_left() + 1);
+ c.growth_info().OverwriteFullAsEmpty();
return;
}
+ c.growth_info().OverwriteFullAsDeleted();
SetCtrl(c, index, ctrl_t::kDeleted, slot_size);
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
- bool reuse) {
+ bool reuse, bool soo_enabled) {
c.set_size(0);
if (reuse) {
+ assert(!soo_enabled || c.capacity() > SooCapacity());
ResetCtrl(c, policy.slot_size);
ResetGrowthLeft(c);
c.infoz().RecordStorageChanged(0, c.capacity());
@@ -263,118 +336,308 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
// We need to record infoz before calling dealloc, which will unregister
// infoz.
c.infoz().RecordClearedReservation();
- c.infoz().RecordStorageChanged(0, 0);
+ c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
(*policy.dealloc)(c, policy);
- c.set_control(EmptyGroup());
- c.set_generation_ptr(EmptyGeneration());
- c.set_slots(nullptr);
- c.set_capacity(0);
+ c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{};
}
}
void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
- ctrl_t* new_ctrl, size_t new_capacity) const {
+ ctrl_t* __restrict new_ctrl, size_t new_capacity) const {
assert(is_single_group(new_capacity));
constexpr size_t kHalfWidth = Group::kWidth / 2;
+ constexpr size_t kQuarterWidth = Group::kWidth / 4;
assert(old_capacity_ < kHalfWidth);
+ static_assert(sizeof(uint64_t) >= kHalfWidth,
+ "Group size is too large. The ctrl bytes for half a group must "
+ "fit into a uint64_t for this implementation.");
+ static_assert(sizeof(uint64_t) <= Group::kWidth,
+ "Group size is too small. The ctrl bytes for a group must "
+ "cover a uint64_t for this implementation.");
const size_t half_old_capacity = old_capacity_ / 2;
// NOTE: operations are done with compile time known size = kHalfWidth.
// Compiler optimizes that into single ASM operation.
- // Copy second half of bytes to the beginning.
- // We potentially copy more bytes in order to have compile time known size.
- // Mirrored bytes from the old_ctrl_ will also be copied.
- // In case of old_capacity_ == 3, we will copy 1st element twice.
+ // Load the bytes from half_old_capacity + 1. This contains the last half of
+ // old_ctrl bytes, followed by the sentinel byte, and then the first half of
+ // the cloned bytes. This effectively shuffles the control bytes.
+ uint64_t copied_bytes = 0;
+ copied_bytes =
+ absl::little_endian::Load64(old_ctrl() + half_old_capacity + 1);
+
+ // We change the sentinel byte to kEmpty before storing to both the start of
+ // the new_ctrl, and past the end of the new_ctrl later for the new cloned
+ // bytes. Note that this is faster than setting the sentinel byte to kEmpty
+ // after the copy directly in new_ctrl because we are limited on store
+ // bandwidth.
+ constexpr uint64_t kEmptyXorSentinel =
+ static_cast<uint8_t>(ctrl_t::kEmpty) ^
+ static_cast<uint8_t>(ctrl_t::kSentinel);
+ const uint64_t mask_convert_old_sentinel_to_empty =
+ kEmptyXorSentinel << (half_old_capacity * 8);
+ copied_bytes ^= mask_convert_old_sentinel_to_empty;
+
+ // Copy second half of bytes to the beginning. This correctly sets the bytes
+ // [0, old_capacity]. We potentially copy more bytes in order to have compile
+ // time known size. Mirrored bytes from the old_ctrl() will also be copied. In
+ // case of old_capacity_ == 3, we will copy 1st element twice.
// Examples:
+ // (old capacity = 1)
// old_ctrl = 0S0EEEEEEE...
- // new_ctrl = S0EEEEEEEE...
+ // new_ctrl = E0EEEEEE??...
//
- // old_ctrl = 01S01EEEEE...
- // new_ctrl = 1S01EEEEEE...
+ // (old capacity = 3)
+ // old_ctrl = 012S012EEEEE...
+ // new_ctrl = 12E012EE????...
//
+ // (old capacity = 7)
// old_ctrl = 0123456S0123456EE...
- // new_ctrl = 456S0123?????????...
- std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth);
- // Clean up copied kSentinel from old_ctrl.
- new_ctrl[half_old_capacity] = ctrl_t::kEmpty;
-
- // Clean up damaged or uninitialized bytes.
-
- // Clean bytes after the intended size of the copy.
- // Example:
- // new_ctrl = 1E01EEEEEEE????
- // *new_ctrl= 1E0EEEEEEEE????
- // position /
- std::memset(new_ctrl + old_capacity_ + 1, static_cast<int8_t>(ctrl_t::kEmpty),
- kHalfWidth);
- // Clean non-mirrored bytes that are not initialized.
- // For small old_capacity that may be inside of mirrored bytes zone.
+ // new_ctrl = 456E0123?????????...
+ absl::little_endian::Store64(new_ctrl, copied_bytes);
+
+ // Set the space [old_capacity + 1, new_capacity] to empty as these bytes will
+ // not be written again. This is safe because
+ // NumControlBytes = new_capacity + kWidth and new_capacity >=
+ // old_capacity+1.
// Examples:
- // new_ctrl = 1E0EEEEEEEE??????????....
- // *new_ctrl= 1E0EEEEEEEEEEEEE?????....
- // position /
+ // (old_capacity = 3, new_capacity = 15)
+ // new_ctrl = 12E012EE?????????????...??
+ // *new_ctrl = 12E0EEEEEEEEEEEEEEEE?...??
+ // position / S
//
- // new_ctrl = 456E0123???????????...
- // *new_ctrl= 456E0123EEEEEEEE???...
- // position /
- std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
- kHalfWidth);
- // Clean last mirrored bytes that are not initialized
- // and will not be overwritten by mirroring.
+ // (old_capacity = 7, new_capacity = 15)
+ // new_ctrl = 456E0123?????????????????...??
+ // *new_ctrl = 456E0123EEEEEEEEEEEEEEEE?...??
+ // position / S
+ std::memset(new_ctrl + old_capacity_ + 1, static_cast<int8_t>(ctrl_t::kEmpty),
+ Group::kWidth);
+
+ // Set the last kHalfWidth bytes to empty, to ensure the bytes all the way to
+ // the end are initialized.
// Examples:
- // new_ctrl = 1E0EEEEEEEEEEEEE????????
- // *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE
- // position S /
+ // new_ctrl = 12E0EEEEEEEEEEEEEEEE?...???????
+ // *new_ctrl = 12E0EEEEEEEEEEEEEEEE???EEEEEEEE
+ // position S /
//
- // new_ctrl = 456E0123EEEEEEEE???????????????
- // *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE
- // position S /
- std::memset(new_ctrl + new_capacity + kHalfWidth,
+ // new_ctrl = 456E0123EEEEEEEEEEEEEEEE???????
+ // *new_ctrl = 456E0123EEEEEEEEEEEEEEEEEEEEEEE
+ // position S /
+ std::memset(new_ctrl + NumControlBytes(new_capacity) - kHalfWidth,
static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
- // Create mirrored bytes. old_capacity_ < kHalfWidth
- // Example:
- // new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE
- // *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE
- // position S/
- ctrl_t g[kHalfWidth];
- std::memcpy(g, new_ctrl, kHalfWidth);
- std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth);
+ // Copy the first bytes to the end (starting at new_capacity +1) to set the
+ // cloned bytes. Note that we use the already copied bytes from old_ctrl here
+ // rather than copying from new_ctrl to avoid a Read-after-Write hazard, since
+ // new_ctrl was just written to. The first old_capacity-1 bytes are set
+ // correctly. Then there may be up to old_capacity bytes that need to be
+ // overwritten, and any remaining bytes will be correctly set to empty. This
+ // sets [new_capacity + 1, new_capacity +1 + old_capacity] correctly.
+ // Examples:
+ // new_ctrl = 12E0EEEEEEEEEEEEEEEE?...???????
+ // *new_ctrl = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
+ // position S/
+ //
+ // new_ctrl = 456E0123EEEEEEEE?...???EEEEEEEE
+ // *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE
+ // position S/
+ absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
+
+ // Set The remaining bytes at the end past the cloned bytes to empty. The
+ // incorrectly set bytes are [new_capacity + old_capacity + 2,
+ // min(new_capacity + 1 + kHalfWidth, new_capacity + old_capacity + 2 +
+ // half_old_capacity)]. Taking the difference, we need to set min(kHalfWidth -
+ // (old_capacity + 1), half_old_capacity)]. Since old_capacity < kHalfWidth,
+ // half_old_capacity < kQuarterWidth, so we set kQuarterWidth beginning at
+ // new_capacity + old_capacity + 2 to kEmpty.
+ // Examples:
+ // new_ctrl = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
+ // *new_ctrl = 12E0EEEEEEEEEEEE12E0EEEEEEEEEEE
+ // position S /
+ //
+ // new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE
+ // *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE (no change)
+ // position S /
+ std::memset(new_ctrl + new_capacity + old_capacity_ + 2,
+ static_cast<int8_t>(ctrl_t::kEmpty), kQuarterWidth);
+
+ // Finally, we set the new sentinel byte.
+ new_ctrl[new_capacity] = ctrl_t::kSentinel;
+}
- // Finally set sentinel to its place.
+void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
+ size_t new_capacity) {
+ assert(is_single_group(new_capacity));
+ std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ NumControlBytes(new_capacity));
+ assert(HashSetResizeHelper::SooSlotIndex() == 1);
+ // This allows us to avoid branching on had_soo_slot_.
+ assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);
+ new_ctrl[1] = new_ctrl[new_capacity + 2] = h2;
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
- void* old_slots, void* new_slots, size_t slot_size) const {
+ void* new_slots, size_t slot_size) const {
assert(old_capacity_ > 0);
const size_t half_old_capacity = old_capacity_ / 2;
- SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+ SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
std::memcpy(new_slots,
- SlotAddress(old_slots, half_old_capacity + 1, slot_size),
+ SlotAddress(old_slots(), half_old_capacity + 1, slot_size),
slot_size * half_old_capacity);
std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size),
- old_slots, slot_size * (half_old_capacity + 1));
+ old_slots(), slot_size * (half_old_capacity + 1));
}
void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
- CommonFields& c, void* old_slots, size_t slot_size) {
+ CommonFields& c, size_t slot_size) {
assert(old_capacity_ < Group::kWidth / 2);
assert(is_single_group(c.capacity()));
assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
- GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(),
- slot_size);
+ GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size);
// We poison since GrowIntoSingleGroupShuffleTransferableSlots
// may leave empty slots unpoisoned.
PoisonSingleGroupEmptySlots(c, slot_size);
}
+void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
+ size_t slot_size) {
+ assert(was_soo_);
+ assert(had_soo_slot_);
+ assert(is_single_group(c.capacity()));
+ std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
+ old_soo_data(), slot_size);
+ PoisonSingleGroupEmptySlots(c, slot_size);
+}
+
+namespace {
+
+// Called whenever the table needs to vacate empty slots either by removing
+// tombstones via rehash or growth.
+ABSL_ATTRIBUTE_NOINLINE
+FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
+ const PolicyFunctions& policy) {
+ const size_t cap = common.capacity();
+ if (cap > Group::kWidth &&
+ // Do these calculations in 64-bit to avoid overflow.
+ common.size() * uint64_t{32} <= cap * uint64_t{25}) {
+ // Squash DELETED without growing if there is enough capacity.
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity.
+ // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
+ DropDeletesWithoutResize(common, policy);
+ } else {
+ // Otherwise grow the container.
+ policy.resize(common, NextCapacity(cap), HashtablezInfoHandle{});
+ }
+ // This function is typically called with tables containing deleted slots.
+ // The table will be big and `FindFirstNonFullAfterResize` will always
+ // fallback to `find_first_non_full`. So using `find_first_non_full` directly.
+ return find_first_non_full(common, hash);
+}
+
+} // namespace
+
+const void* GetHashRefForEmptyHasher(const CommonFields& common) {
+ // Empty base optimization typically make the empty base class address to be
+ // the same as the first address of the derived class object.
+ // But we generally assume that for empty hasher we can return any valid
+ // pointer.
+ return &common;
+}
+
+size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
+ const PolicyFunctions& policy) {
+ // When there are no deleted slots in the table
+ // and growth_left is positive, we can insert at the first
+ // empty slot in the probe sequence (target).
+ const bool use_target_hint =
+ // Optimization is disabled when generations are enabled.
+ // We have to rehash even sparse tables randomly in such mode.
+ !SwisstableGenerationsEnabled() &&
+ common.growth_info().HasNoDeletedAndGrowthLeft();
+ if (ABSL_PREDICT_FALSE(!use_target_hint)) {
+ // Notes about optimized mode when generations are disabled:
+ // We do not enter this branch if table has no deleted slots
+ // and growth_left is positive.
+ // We enter this branch in the following cases listed in decreasing
+ // frequency:
+ // 1. Table without deleted slots (>95% cases) that needs to be resized.
+ // 2. Table with deleted slots that has space for the inserting element.
+ // 3. Table with deleted slots that needs to be rehashed or resized.
+ if (ABSL_PREDICT_TRUE(common.growth_info().HasNoGrowthLeftAndNoDeleted())) {
+ const size_t old_capacity = common.capacity();
+ policy.resize(common, NextCapacity(old_capacity), HashtablezInfoHandle{});
+ target = HashSetResizeHelper::FindFirstNonFullAfterResize(
+ common, old_capacity, hash);
+ } else {
+ // Note: the table may have no deleted slots here when generations
+ // are enabled.
+ const bool rehash_for_bug_detection =
+ common.should_rehash_for_bug_detection_on_insert();
+ if (rehash_for_bug_detection) {
+ // Move to a different heap allocation in order to detect bugs.
+ const size_t cap = common.capacity();
+ policy.resize(common,
+ common.growth_left() > 0 ? cap : NextCapacity(cap),
+ HashtablezInfoHandle{});
+ }
+ if (ABSL_PREDICT_TRUE(common.growth_left() > 0)) {
+ target = find_first_non_full(common, hash);
+ } else {
+ target = FindInsertPositionWithGrowthOrRehash(common, hash, policy);
+ }
+ }
+ }
+ PrepareInsertCommon(common);
+ common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
+ SetCtrl(common, target.offset, H2(hash), policy.slot_size);
+ common.infoz().RecordInsert(hash, target.probe_length);
+ return target.offset;
+}
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
index 3518bc3404..d4fe8f5c21 100644
--- a/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -80,7 +80,7 @@
// slot_type slots[capacity];
// };
//
-// The length of this array is computed by `AllocSize()` below.
+// The length of this array is computed by `RawHashSetLayout::alloc_size` below.
//
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
// platform-specific size) that define the state of the corresponding slot in
@@ -100,6 +100,13 @@
// Storing control bytes in a separate array also has beneficial cache effects,
// since more logical slots will fit into a cache line.
//
+// # Small Object Optimization (SOO)
+//
+// When the size/alignment of the value_type and the capacity of the table are
+// small, we enable small object optimization and store the values inline in
+// the raw_hash_set object. This optimization allows us to avoid
+// allocation/deallocation as well as cache/dTLB misses.
+//
// # Hashing
//
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
@@ -233,9 +240,10 @@ namespace container_internal {
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
-#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
- defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
- defined(ABSL_HAVE_MEMORY_SANITIZER)
+#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
+ !defined(NDEBUG_SANITIZER) // If defined, performance is important.
// When compiled in sanitizer mode, we add generation integers to the backing
// array and iterators. In the backing array, we store the generation between
// the control bytes and the slots. When iterators are dereferenced, we assert
@@ -374,6 +382,9 @@ uint32_t TrailingZeros(T x) {
return static_cast<uint32_t>(countr_zero(x));
}
+// 8 bytes bitmask with most significant bit set for every byte.
+constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
+
// An abstract bitmask, such as that emitted by a SIMD instruction.
//
// Specifically, this type implements a simple bitset whose representation is
@@ -423,27 +434,35 @@ class NonIterableBitMask {
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
+// non zero abstract bit is allowed to have additional bits
+// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
//
// For example:
// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
-template <class T, int SignificantBits, int Shift = 0>
+template <class T, int SignificantBits, int Shift = 0,
+ bool NullifyBitsOnIteration = false>
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
using Base = NonIterableBitMask<T, SignificantBits, Shift>;
static_assert(std::is_unsigned<T>::value, "");
static_assert(Shift == 0 || Shift == 3, "");
+ static_assert(!NullifyBitsOnIteration || Shift == 3, "");
public:
- explicit BitMask(T mask) : Base(mask) {}
+ explicit BitMask(T mask) : Base(mask) {
+ if (Shift == 3 && !NullifyBitsOnIteration) {
+ assert(this->mask_ == (this->mask_ & kMsbs8Bytes));
+ }
+ }
// BitMask is an iterator over the indices of its abstract bits.
using value_type = int;
using iterator = BitMask;
using const_iterator = BitMask;
BitMask& operator++() {
- if (Shift == 3) {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- this->mask_ &= msbs;
+ if (Shift == 3 && NullifyBitsOnIteration) {
+ this->mask_ &= kMsbs8Bytes;
}
this->mask_ &= (this->mask_ - 1);
return *this;
@@ -520,10 +539,24 @@ ABSL_DLL extern const ctrl_t kEmptyGroup[32];
// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
// Const must be cast away here; no uses of this function will actually write
- // to it, because it is only used for empty tables.
+ // to it because it is only used for empty tables.
return const_cast<ctrl_t*>(kEmptyGroup + 16);
}
+// For use in SOO iterators.
+// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
+// bit in iterators. This would add branches but reduce cache misses.
+ABSL_DLL extern const ctrl_t kSooControl[17];
+
+// Returns a pointer to a full byte followed by a sentinel byte.
+inline ctrl_t* SooControl() {
+ // Const must be cast away here; no uses of this function will actually write
+ // to it because it is only used for SOO iterators.
+ return const_cast<ctrl_t*>(kSooControl);
+}
+// Whether ctrl is from the SooControl array.
+inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
+
// Returns a pointer to a generation to use for an empty hashtable.
GenerationType* EmptyGeneration();
@@ -535,7 +568,37 @@ inline bool IsEmptyGeneration(const GenerationType* generation) {
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
+bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
+ const ctrl_t* ctrl);
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
+ ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
+ ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+#if defined(NDEBUG)
+ return false;
+#else
+ return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
+#endif
+}
+
+// Returns insert position for the given mask.
+// We want to add entropy even when ASLR is not enabled.
+// In debug build we will randomly insert in either the front or back of
+// the group.
+// TODO(kfm,sbenza): revisit after we do unconditional mixing
+template <class Mask>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
+ Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
+ ABSL_ATTRIBUTE_UNUSED size_t hash,
+ ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+#if defined(NDEBUG)
+ return mask.LowestBitSet();
+#else
+ return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
+ ? mask.HighestBitSet()
+ : mask.LowestBitSet();
+#endif
+}
// Returns a per-table, hash salt, which changes on resize. This gets mixed into
// H1 to randomize iteration order per-table.
@@ -560,7 +623,12 @@ inline h2_t H2(size_t hash) { return hash & 0x7F; }
// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsFull(ctrl_t c) {
+ // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
+ // is not a value in the enum. Both ways are equivalent, but this way makes
+ // linters happier.
+ return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
+}
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
@@ -646,6 +714,14 @@ struct GroupSse2Impl {
static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
}
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const {
+ return BitMask<uint16_t, kWidth>(
+ static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
+ }
+
// Returns a bitmask representing the positions of empty or deleted slots.
NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
@@ -685,10 +761,11 @@ struct GroupAArch64Impl {
ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
}
- BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ auto Match(h2_t hash) const {
uint8x8_t dup = vdup_n_u8(hash);
auto mask = vceq_u8(ctrl, dup);
- return BitMask<uint64_t, kWidth, 3>(
+ return BitMask<uint64_t, kWidth, /*Shift=*/3,
+ /*NullifyBitsOnIteration=*/true>(
vget_lane_u64(vreinterpret_u64_u8(mask), 0));
}
@@ -704,12 +781,25 @@ struct GroupAArch64Impl {
// Returns a bitmask representing the positions of full slots.
// Note: for `is_small()` tables group may contain the "same" slot twice:
// original and mirrored.
- BitMask<uint64_t, kWidth, 3> MaskFull() const {
+ auto MaskFull() const {
uint64_t mask = vget_lane_u64(
vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
vdup_n_s8(static_cast<int8_t>(0)))),
0);
- return BitMask<uint64_t, kWidth, 3>(mask);
+ return BitMask<uint64_t, kWidth, /*Shift=*/3,
+ /*NullifyBitsOnIteration=*/true>(mask);
+ }
+
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const {
+ uint64_t mask = vget_lane_u64(
+ vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
+ vdup_n_s8(static_cast<int8_t>(0)))),
+ 0);
+ return BitMask<uint64_t, kWidth, /*Shift=*/3,
+ /*NullifyBitsOnIteration=*/true>(mask);
}
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
@@ -736,11 +826,10 @@ struct GroupAArch64Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
- constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t slsbs = 0x0202020202020202ULL;
constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
auto x = slsbs & (mask >> 6);
- auto res = (x + midbs) | msbs;
+ auto res = (x + midbs) | kMsbs8Bytes;
little_endian::Store64(dst, res);
}
@@ -768,30 +857,33 @@ struct GroupPortableImpl {
// v = 0x1716151413121110
// hash = 0x12
// retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
- constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t lsbs = 0x0101010101010101ULL;
auto x = ctrl ^ (lsbs * hash);
- return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
}
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
- msbs);
+ kMsbs8Bytes);
}
// Returns a bitmask representing the positions of full slots.
// Note: for `is_small()` tables group may contain the "same" slot twice:
// original and mirrored.
BitMask<uint64_t, kWidth, 3> MaskFull() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl ^ msbs) & msbs);
+ return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
+ }
+
+ // Returns a bitmask representing the positions of non full slots.
+ // Note: this includes: kEmpty, kDeleted, kSentinel.
+ // It is useful in contexts when kSentinel is not present.
+ auto MaskNonFull() const {
+ return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
}
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
- msbs);
+ kMsbs8Bytes);
}
uint32_t CountLeadingEmptyOrDeleted() const {
@@ -803,9 +895,8 @@ struct GroupPortableImpl {
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl & msbs;
+ auto x = ctrl & kMsbs8Bytes;
auto res = (~x + (x >> 7)) & ~lsbs;
little_endian::Store64(dst, res);
}
@@ -815,21 +906,21 @@ struct GroupPortableImpl {
#ifdef ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
-using GroupEmptyOrDeleted = GroupSse2Impl;
+using GroupFullEmptyOrDeleted = GroupSse2Impl;
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
using Group = GroupAArch64Impl;
// For Aarch64, we use the portable implementation for counting and masking
-// empty or deleted group elements. This is to avoid the latency of moving
+// full, empty or deleted group elements. This is to avoid the latency of moving
// between data GPRs and Neon registers when it does not provide a benefit.
// Using Neon is profitable when we call Match(), but is not when we don't,
-// which is the case when we do *EmptyOrDeleted operations. It is difficult to
-// make a similar approach beneficial on other architectures such as x86 since
-// they have much lower GPR <-> vector register transfer latency and 16-wide
-// Groups.
-using GroupEmptyOrDeleted = GroupPortableImpl;
+// which is the case when we do *EmptyOrDeleted and MaskFull operations.
+// It is difficult to make a similar approach beneficial on other architectures
+// such as x86 since they have much lower GPR <-> vector register transfer
+// latency and 16-wide Groups.
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
#else
using Group = GroupPortableImpl;
-using GroupEmptyOrDeleted = GroupPortableImpl;
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
#endif
// When there is an insertion with no reserved growth, we rehash with
@@ -978,17 +1069,96 @@ using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
#endif
+// Stored the information regarding number of slots we can still fill
+// without needing to rehash.
+//
+// We want to ensure sufficient number of empty slots in the table in order
+// to keep probe sequences relatively short. Empty slot in the probe group
+// is required to stop probing.
+//
+// Tombstones (kDeleted slots) are not included in the growth capacity,
+// because we'd like to rehash when the table is filled with tombstones and/or
+// full slots.
+//
+// GrowthInfo also stores a bit that encodes whether table may have any
+// deleted slots.
+// Most of the tables (>95%) have no deleted slots, so some functions can
+// be more efficient with this information.
+//
+// Callers can also force a rehash via the standard `rehash(0)`,
+// which will recompute this value as a side-effect.
+//
+// See also `CapacityToGrowth()`.
+class GrowthInfo {
+ public:
+ // Leaves data member uninitialized.
+ GrowthInfo() = default;
+
+ // Initializes the GrowthInfo assuming we can grow `growth_left` elements
+ // and there are no kDeleted slots in the table.
+ void InitGrowthLeftNoDeleted(size_t growth_left) {
+ growth_left_info_ = growth_left;
+ }
+
+ // Overwrites single full slot with an empty slot.
+ void OverwriteFullAsEmpty() { ++growth_left_info_; }
+
+ // Overwrites single empty slot with a full slot.
+ void OverwriteEmptyAsFull() {
+ assert(GetGrowthLeft() > 0);
+ --growth_left_info_;
+ }
+
+ // Overwrites several empty slots with full slots.
+ void OverwriteManyEmptyAsFull(size_t cnt) {
+ assert(GetGrowthLeft() >= cnt);
+ growth_left_info_ -= cnt;
+ }
+
+ // Overwrites specified control element with full slot.
+ void OverwriteControlAsFull(ctrl_t ctrl) {
+ assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl)));
+ growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
+ }
+
+ // Overwrites single full slot with a deleted slot.
+ void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
+
+ // Returns true if table satisfies two properties:
+ // 1. Guaranteed to have no kDeleted slots.
+ // 2. There is a place for at least one element to grow.
+ bool HasNoDeletedAndGrowthLeft() const {
+ return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
+ }
+
+ // Returns true if the table satisfies two properties:
+ // 1. Guaranteed to have no kDeleted slots.
+ // 2. There is no growth left.
+ bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
+
+ // Returns true if table guaranteed to have no k
+ bool HasNoDeleted() const {
+ return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
+ }
+
+ // Returns the number of elements left to grow.
+ size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
+
+ private:
+ static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
+ static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
+ // Topmost bit signal whenever there are deleted slots.
+ size_t growth_left_info_;
+};
+
+static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
+static_assert(alignof(GrowthInfo) == alignof(size_t), "");
+
// Returns whether `n` is a valid capacity (i.e., number of slots).
//
// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-// Computes the offset from the start of the backing allocation of control.
-// infoz and growth_left are stored at the beginning of the backing array.
-inline size_t ControlOffset(bool has_infoz) {
- return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
-}
-
// Returns the number of "cloned control bytes".
//
// This is the number of control bytes that are present both at the beginning
@@ -996,36 +1166,157 @@ inline size_t ControlOffset(bool has_infoz) {
// `Group::kWidth`-width probe window starting from any control byte.
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) of the generation counter (if it exists).
-inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return ControlOffset(has_infoz) + num_control_bytes;
+// Returns the number of control bytes including cloned.
+constexpr size_t NumControlBytes(size_t capacity) {
+ return capacity + 1 + NumClonedBytes();
}
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) at which the slots begin.
-inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
- assert(IsValidCapacity(capacity));
- return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
- slot_align - 1) &
- (~slot_align + 1);
+// Computes the offset from the start of the backing allocation of control.
+// infoz and growth_info are stored at the beginning of the backing array.
+inline static size_t ControlOffset(bool has_infoz) {
+ return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
}
-// Given the capacity of a table, computes the total size of the backing
-// array.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
- bool has_infoz) {
- return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
-}
+// Helper class for computing offsets and allocation size of hash set fields.
+class RawHashSetLayout {
+ public:
+ explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
+ : capacity_(capacity),
+ control_offset_(ControlOffset(has_infoz)),
+ generation_offset_(control_offset_ + NumControlBytes(capacity)),
+ slot_offset_(
+ (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
+ (~slot_align + 1)) {
+ assert(IsValidCapacity(capacity));
+ }
+
+ // Returns the capacity of a table.
+ size_t capacity() const { return capacity_; }
+
+ // Returns precomputed offset from the start of the backing allocation of
+ // control.
+ size_t control_offset() const { return control_offset_; }
+
+ // Given the capacity of a table, computes the offset (from the start of the
+ // backing allocation) of the generation counter (if it exists).
+ size_t generation_offset() const { return generation_offset_; }
+
+ // Given the capacity of a table, computes the offset (from the start of the
+ // backing allocation) at which the slots begin.
+ size_t slot_offset() const { return slot_offset_; }
+
+ // Given the capacity of a table, computes the total size of the backing
+ // array.
+ size_t alloc_size(size_t slot_size) const {
+ return slot_offset_ + capacity_ * slot_size;
+ }
+
+ private:
+ size_t capacity_;
+ size_t control_offset_;
+ size_t generation_offset_;
+ size_t slot_offset_;
+};
+
+struct HashtableFreeFunctionsAccess;
+
+// We only allow a maximum of 1 SOO element, which makes the implementation
+// much simpler. Complications with multiple SOO elements include:
+// - Satisfying the guarantee that erasing one element doesn't invalidate
+// iterators to other elements means we would probably need actual SOO
+// control bytes.
+// - In order to prevent user code from depending on iteration order for small
+// tables, we would need to randomize the iteration order somehow.
+constexpr size_t SooCapacity() { return 1; }
+// Sentinel type to indicate SOO CommonFields construction.
+struct soo_tag_t {};
+// Sentinel type to indicate SOO CommonFields construction with full size.
+struct full_soo_tag_t {};
+
+// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
+// thinks that the call to slot_array() in find_or_prepare_insert() is reading
+// uninitialized memory, but slot_array is only called there when the table is
+// non-empty and this memory is initialized when the table is non-empty.
+#if !defined(__clang__) && defined(__GNUC__)
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \
+ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
+ _Pragma("GCC diagnostic pop")
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
+#else
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
+#endif
+
+// This allows us to work around an uninitialized memory warning when
+// constructing begin() iterators in empty hashtables.
+union MaybeInitializedPtr {
+ void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
+ void set(void* ptr) { p = ptr; }
+
+ void* p;
+};
+
+struct HeapPtrs {
+ HeapPtrs() = default;
+ explicit HeapPtrs(ctrl_t* c) : control(c) {}
+
+ // The control bytes (and, also, a pointer near to the base of the backing
+ // array).
+ //
+ // This contains `capacity + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ //
+ // Note that growth_info is stored immediately before this pointer.
+ // May be uninitialized for SOO tables.
+ ctrl_t* control;
+
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `control`. May be uninitialized for empty tables.
+ // Note: we can't use `slots` because Qt defines "slots" as a macro.
+ MaybeInitializedPtr slot_array;
+};
+
+// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
+// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
+union HeapOrSoo {
+ HeapOrSoo() = default;
+ explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
+
+ ctrl_t*& control() {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
+ }
+ ctrl_t* control() const {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
+ }
+ MaybeInitializedPtr& slot_array() {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
+ }
+ MaybeInitializedPtr slot_array() const {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
+ }
+ void* get_soo_data() {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
+ }
+ const void* get_soo_data() const {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
+ }
+
+ HeapPtrs heap;
+ unsigned char soo_data[sizeof(HeapPtrs)];
+};
// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
class CommonFields : public CommonFieldsGenerationInfo {
public:
- CommonFields() = default;
+ CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
+ explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
+ explicit CommonFields(full_soo_tag_t)
+ : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
// Not copyable
CommonFields(const CommonFields&) = delete;
@@ -1035,23 +1326,44 @@ class CommonFields : public CommonFieldsGenerationInfo {
CommonFields(CommonFields&& that) = default;
CommonFields& operator=(CommonFields&&) = default;
- ctrl_t* control() const { return control_; }
- void set_control(ctrl_t* c) { control_ = c; }
+ template <bool kSooEnabled>
+ static CommonFields CreateDefault() {
+ return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{};
+ }
+
+ // The inline data for SOO is written on top of control_/slots_.
+ const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
+ void* soo_data() { return heap_or_soo_.get_soo_data(); }
+
+ HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
+ const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
+
+ ctrl_t* control() const { return heap_or_soo_.control(); }
+ void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
void* backing_array_start() const {
- // growth_left (and maybe infoz) is stored before control bytes.
+ // growth_info (and maybe infoz) is stored before control bytes.
assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
return control() - ControlOffset(has_infoz());
}
// Note: we can't use slots() because Qt defines "slots" as a macro.
- void* slot_array() const { return slots_; }
- void set_slots(void* s) { slots_ = s; }
+ void* slot_array() const { return heap_or_soo_.slot_array().get(); }
+ MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
+ void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
// The number of filled slots.
size_t size() const { return size_ >> HasInfozShift(); }
void set_size(size_t s) {
size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
}
+ void set_empty_soo() {
+ AssertInSooMode();
+ size_ = 0;
+ }
+ void set_full_soo() {
+ AssertInSooMode();
+ size_ = size_t{1} << HasInfozShift();
+ }
void increment_size() {
assert(size() < capacity());
size_ += size_t{1} << HasInfozShift();
@@ -1070,15 +1382,17 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The number of slots we can still fill without needing to rehash.
// This is stored in the heap allocation before the control bytes.
- size_t growth_left() const {
- const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
- assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
+ // TODO(b/289225379): experiment with moving growth_info back inline to
+ // increase room for SOO.
+ size_t growth_left() const { return growth_info().GetGrowthLeft(); }
+
+ GrowthInfo& growth_info() {
+ auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
+ assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
return *gl_ptr;
}
- void set_growth_left(size_t gl) {
- size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
- assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
- *gl_ptr = gl;
+ GrowthInfo growth_info() const {
+ return const_cast<CommonFields*>(this)->growth_info();
}
bool has_infoz() const {
@@ -1103,12 +1417,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
should_rehash_for_bug_detection_on_insert(control(), capacity());
}
bool should_rehash_for_bug_detection_on_move() const {
- return CommonFieldsGenerationInfo::
- should_rehash_for_bug_detection_on_move(control(), capacity());
- }
- void maybe_increment_generation_on_move() {
- if (capacity() == 0) return;
- increment_generation();
+ return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
+ control(), capacity());
}
void reset_reserved_growth(size_t reservation) {
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
@@ -1116,7 +1426,16 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The size of the backing array allocation.
size_t alloc_size(size_t slot_size, size_t slot_align) const {
- return AllocSize(capacity(), slot_size, slot_align, has_infoz());
+ return RawHashSetLayout(capacity(), slot_align, has_infoz())
+ .alloc_size(slot_size);
+ }
+
+ // Move fields other than heap_or_soo_.
+ void move_non_heap_or_soo_fields(CommonFields& that) {
+ static_cast<CommonFieldsGenerationInfo&>(*this) =
+ std::move(static_cast<CommonFieldsGenerationInfo&>(that));
+ capacity_ = that.capacity_;
+ size_ = that.size_;
}
// Returns the number of control bytes set to kDeleted. For testing only.
@@ -1132,21 +1451,12 @@ class CommonFields : public CommonFieldsGenerationInfo {
return (size_t{1} << HasInfozShift()) - 1;
}
- // TODO(b/182800944): Investigate removing some of these fields:
- // - control/slots can be derived from each other
-
- // The control bytes (and, also, a pointer near to the base of the backing
- // array).
- //
- // This contains `capacity + 1 + NumClonedBytes()` entries, even
- // when the table is empty (hence EmptyGroup).
- //
- // Note that growth_left is stored immediately before this pointer.
- ctrl_t* control_ = EmptyGroup();
-
- // The beginning of the slots, located at `SlotOffset()` bytes after
- // `control`. May be null for empty tables.
- void* slots_ = nullptr;
+ // We can't assert that SOO is enabled because we don't have SooEnabled(), but
+ // we assert what we can.
+ void AssertInSooMode() const {
+ assert(capacity() == SooCapacity());
+ assert(!has_infoz());
+ }
// The number of slots in the backing array. This is always 2^N-1 for an
// integer N. NOTE: we tried experimenting with compressing the capacity and
@@ -1154,10 +1464,16 @@ class CommonFields : public CommonFieldsGenerationInfo {
// power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
// size_ and storing size in the low bits. Both of these experiments were
// regressions, presumably because we need capacity to do find operations.
- size_t capacity_ = 0;
+ size_t capacity_;
// The size and also has one bit that stores whether we have infoz.
- size_t size_ = 0;
+ // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
+ // encode the size in SOO case. We would be making size()/capacity() more
+ // expensive in order to have more SOO space.
+ size_t size_;
+
+ // Either the control/slots pointers or the SOO slot.
+ HeapOrSoo heap_or_soo_;
};
template <class Policy, class Hash, class Eq, class Alloc>
@@ -1320,6 +1636,10 @@ inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
const void* const& slot_b) {
// If either control byte is null, then we can't tell.
if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
+ const bool a_is_soo = IsSooControl(ctrl_a);
+ if (a_is_soo != IsSooControl(ctrl_b)) return false;
+ if (a_is_soo) return slot_a == slot_b;
+
const void* low_slot = slot_a;
const void* hi_slot = slot_b;
if (ctrl_a > ctrl_b) {
@@ -1343,41 +1663,45 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
// - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
// - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
// the chances that the hot paths will be inlined.
+
+ // fail_if(is_invalid, message) crashes when is_invalid is true and provides
+ // an error message based on `message`.
+ const auto fail_if = [](bool is_invalid, const char* message) {
+ if (ABSL_PREDICT_FALSE(is_invalid)) {
+ ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
+ }
+ };
+
const bool a_is_default = ctrl_a == EmptyGroup();
const bool b_is_default = ctrl_b == EmptyGroup();
- if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
- ABSL_RAW_LOG(
- FATAL,
- "Invalid iterator comparison. Comparing default-constructed iterator "
- "with non-default-constructed iterator.");
- }
if (a_is_default && b_is_default) return;
+ fail_if(a_is_default != b_is_default,
+ "Comparing default-constructed hashtable iterator with a "
+ "non-default-constructed hashtable iterator.");
if (SwisstableGenerationsEnabled()) {
if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
+ // Users don't need to know whether the tables are SOO so don't mention SOO
+ // in the debug message.
+ const bool a_is_soo = IsSooControl(ctrl_a);
+ const bool b_is_soo = IsSooControl(ctrl_b);
+ fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
+ "Comparing iterators from different hashtables.");
+
const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
- if (a_is_empty != b_is_empty) {
- ABSL_RAW_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator from a "
- "non-empty hashtable with an iterator from an empty "
- "hashtable.");
- }
- if (a_is_empty && b_is_empty) {
- ABSL_RAW_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterators from "
- "different empty hashtables.");
- }
+ fail_if(a_is_empty != b_is_empty,
+ "Comparing an iterator from an empty hashtable with an iterator "
+ "from a non-empty hashtable.");
+ fail_if(a_is_empty && b_is_empty,
+ "Comparing iterators from different empty hashtables.");
+
const bool a_is_end = ctrl_a == nullptr;
const bool b_is_end = ctrl_b == nullptr;
- if (a_is_end || b_is_end) {
- ABSL_RAW_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator with an "
- "end() iterator from a different hashtable.");
- }
- ABSL_RAW_LOG(FATAL,
- "Invalid iterator comparison. Comparing non-end() iterators "
- "from different hashtables.");
+ fail_if(a_is_end || b_is_end,
+ "Comparing iterator with an end() iterator from a different "
+ "hashtable.");
+ fail_if(true, "Comparing non-end() iterators from different hashtables.");
} else {
ABSL_HARDENING_ASSERT(
AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
@@ -1432,20 +1756,17 @@ template <typename = void>
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
auto seq = probe(common, hash);
const ctrl_t* ctrl = common.control();
+ if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
+ !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
+ return {seq.offset(), /*probe_length=*/0};
+ }
while (true) {
- GroupEmptyOrDeleted g{ctrl + seq.offset()};
+ GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
auto mask = g.MaskEmptyOrDeleted();
if (mask) {
-#if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
-#endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
+ return {
+ seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
+ seq.index()};
}
seq.next();
assert(seq.index() <= common.capacity() && "full table!");
@@ -1462,7 +1783,8 @@ extern template FindInfo find_first_non_full(const CommonFields&, size_t);
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
inline void ResetGrowthLeft(CommonFields& common) {
- common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
+ common.growth_info().InitGrowthLeftNoDeleted(
+ CapacityToGrowth(common.capacity()) - common.size());
}
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
@@ -1476,43 +1798,140 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
}
-// Sets `ctrl[i]` to `h`.
-//
-// Unlike setting it directly, this function will perform bounds checks and
-// mirror the value to the cloned tail if necessary.
-inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
- size_t slot_size) {
- const size_t capacity = common.capacity();
- assert(i < capacity);
-
- auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
+// Sets sanitizer poisoning for slot corresponding to control byte being set.
+inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
+ size_t slot_size) {
+ assert(i < c.capacity());
+ auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
if (IsFull(h)) {
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
} else {
SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
+}
- ctrl_t* ctrl = common.control();
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
+inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
+ size_t slot_size) {
+ DoSanitizeOnSetCtrl(c, i, h, slot_size);
+ ctrl_t* ctrl = c.control();
ctrl[i] = h;
- ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+ ctrl[((i - NumClonedBytes()) & c.capacity()) +
+ (NumClonedBytes() & c.capacity())] = h;
+}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
+ SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
}
+// Like SetCtrl, but in a single group table, we can save some operations when
+// setting the cloned control byte.
+inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
+ size_t slot_size) {
+ assert(is_single_group(c.capacity()));
+ DoSanitizeOnSetCtrl(c, i, h, slot_size);
+ ctrl_t* ctrl = c.control();
+ ctrl[i] = h;
+ ctrl[i + c.capacity() + 1] = h;
+}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
-inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
- size_t slot_size) {
- SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
+inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
+ size_t slot_size) {
+ SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
}
-// growth_left (which is a size_t) is stored with the backing array.
+// growth_info (which is a size_t) is stored with the backing array.
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
- return (std::max)(align_of_slot, alignof(size_t));
+ return (std::max)(align_of_slot, alignof(GrowthInfo));
}
// Returns the address of the ith slot in slots where each slot occupies
// slot_size.
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
- return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
- (slot * slot_size));
+ return static_cast<void*>(static_cast<char*>(slot_array) +
+ (slot * slot_size));
+}
+
+// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
+// No insertion to the table allowed during Callback call.
+// Erasure is allowed only for the element passed to the callback.
+template <class SlotType, class Callback>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
+ const CommonFields& c, SlotType* slot, Callback cb) {
+ const size_t cap = c.capacity();
+ const ctrl_t* ctrl = c.control();
+ if (is_small(cap)) {
+ // Mirrored/cloned control bytes in small table are also located in the
+ // first group (starting from position 0). We are taking group from position
+ // `capacity` in order to avoid duplicates.
+
+ // Small tables capacity fits into portable group, where
+ // GroupPortableImpl::MaskFull is more efficient for the
+ // capacity <= GroupPortableImpl::kWidth.
+ assert(cap <= GroupPortableImpl::kWidth &&
+ "unexpectedly large small capacity");
+ static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
+ "unexpected group width");
+ // Group starts from kSentinel slot, so indices in the mask will
+ // be increased by 1.
+ const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
+ --ctrl;
+ --slot;
+ for (uint32_t i : mask) {
+ cb(ctrl + i, slot + i);
+ }
+ return;
+ }
+ size_t remaining = c.size();
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
+ while (remaining != 0) {
+ for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
+ assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
+ cb(ctrl + i, slot + i);
+ --remaining;
+ }
+ ctrl += Group::kWidth;
+ slot += Group::kWidth;
+ assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
+ "hash table was modified unexpectedly");
+ }
+ // NOTE: erasure of the current element is allowed in callback for
+ // absl::erase_if specialization. So we use `>=`.
+ assert(original_size_for_assert >= c.size() &&
+ "hash table was modified unexpectedly");
+}
+
+template <typename CharAlloc>
+constexpr bool ShouldSampleHashtablezInfo() {
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or won't make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ return std::is_same<CharAlloc, std::allocator<char>>::value;
+}
+
+template <bool kSooEnabled>
+HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
+ size_t sizeof_value,
+ size_t old_capacity, bool was_soo,
+ HashtablezInfoHandle forced_infoz,
+ CommonFields& c) {
+ if (forced_infoz.IsSampled()) return forced_infoz;
+ // In SOO, we sample on the first insertion so if this is an empty SOO case
+ // (e.g. when reserve is called), then we still need to sample.
+ if (kSooEnabled && was_soo && c.size() == 0) {
+ return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
+ }
+ // For non-SOO cases, we sample whenever the capacity is increasing from zero
+ // to non-zero.
+ if (!kSooEnabled && old_capacity == 0) {
+ return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
+ }
+ return c.infoz();
}
// Helper class to perform resize of the hash set.
@@ -1521,17 +1940,21 @@ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
// See GrowIntoSingleGroupShuffleControlBytes for details.
class HashSetResizeHelper {
public:
- explicit HashSetResizeHelper(CommonFields& c)
- : old_ctrl_(c.control()),
- old_capacity_(c.capacity()),
- had_infoz_(c.has_infoz()) {}
-
- // Optimized for small groups version of `find_first_non_full` applicable
- // only right after calling `raw_hash_set::resize`.
+ explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
+ HashtablezInfoHandle forced_infoz)
+ : old_capacity_(c.capacity()),
+ had_infoz_(c.has_infoz()),
+ was_soo_(was_soo),
+ had_soo_slot_(had_soo_slot),
+ forced_infoz_(forced_infoz) {}
+
+ // Optimized for small groups version of `find_first_non_full`.
+ // Beneficial only right after calling `raw_hash_set::resize`.
+ // It is safe to call in case capacity is big or was not changed, but there
+ // will be no performance benefit.
// It has implicit assumption that `resize` will call
// `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
- // Falls back to `find_first_non_full` in case of big groups, so it is
- // safe to use after `rehash_and_grow_if_necessary`.
+ // Falls back to `find_first_non_full` in case of big groups.
static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
size_t old_capacity,
size_t hash) {
@@ -1553,14 +1976,30 @@ class HashSetResizeHelper {
return FindInfo{offset, 0};
}
- ctrl_t* old_ctrl() const { return old_ctrl_; }
+ HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
+ void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
+ ctrl_t* old_ctrl() const {
+ assert(!was_soo_);
+ return old_heap_or_soo_.control();
+ }
+ void* old_slots() const {
+ assert(!was_soo_);
+ return old_heap_or_soo_.slot_array().get();
+ }
size_t old_capacity() const { return old_capacity_; }
+ // Returns the index of the SOO slot when growing from SOO to non-SOO in a
+ // single group. See also InitControlBytesAfterSoo(). It's important to use
+ // index 1 so that when resizing from capacity 1 to 3, we can still have
+ // random iteration order between the first two inserted elements.
+ // I.e. it allows inserting the second element at either index 0 or 2.
+ static size_t SooSlotIndex() { return 1; }
+
// Allocates a backing array for the hashtable.
// Reads `capacity` and updates all other fields based on the result of
// the allocation.
//
- // It also may do the folowing actions:
+ // It also may do the following actions:
// 1. initialize control bytes
// 2. initialize slots
// 3. deallocate old slots.
@@ -1590,45 +2029,45 @@ class HashSetResizeHelper {
//
// Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
- size_t AlignOfSlot>
- ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
- Alloc alloc) {
+ bool SooEnabled, size_t AlignOfSlot>
+ ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
+ ctrl_t soo_slot_h2,
+ size_t key_size,
+ size_t value_size) {
assert(c.capacity());
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or won't make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- const size_t sample_size =
- (std::is_same<Alloc, std::allocator<char>>::value &&
- c.slot_array() == nullptr)
- ? SizeOfSlot
- : 0;
HashtablezInfoHandle infoz =
- sample_size > 0 ? Sample(sample_size) : c.infoz();
+ ShouldSampleHashtablezInfo<Alloc>()
+ ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
+ old_capacity_, was_soo_,
+ forced_infoz_, c)
+ : HashtablezInfoHandle{};
const bool has_infoz = infoz.IsSampled();
- const size_t cap = c.capacity();
- const size_t alloc_size =
- AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
- char* mem = static_cast<char*>(
- Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
+ RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
+ char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
+ &alloc, layout.alloc_size(SizeOfSlot)));
const GenerationType old_generation = c.generation();
- c.set_generation_ptr(reinterpret_cast<GenerationType*>(
- mem + GenerationOffset(cap, has_infoz)));
+ c.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
c.set_generation(NextGeneration(old_generation));
- c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
- c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
+ c.set_slots(mem + layout.slot_offset());
ResetGrowthLeft(c);
const bool grow_single_group =
- IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity());
- if (old_capacity_ != 0 && grow_single_group) {
+ IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
+ if (SooEnabled && was_soo_ && grow_single_group) {
+ InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
+ if (TransferUsesMemcpy && had_soo_slot_) {
+ TransferSlotAfterSoo(c, SizeOfSlot);
+ }
+ // SooEnabled implies that old_capacity_ != 0.
+ } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
if (TransferUsesMemcpy) {
- GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
- DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
+ GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
+ DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
} else {
- GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
+ GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
}
} else {
ResetCtrl(c, SizeOfSlot);
@@ -1636,8 +2075,8 @@ class HashSetResizeHelper {
c.set_has_infoz(has_infoz);
if (has_infoz) {
- infoz.RecordStorageChanged(c.size(), cap);
- if (grow_single_group || old_capacity_ == 0) {
+ infoz.RecordStorageChanged(c.size(), layout.capacity());
+ if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
infoz.RecordRehash(0);
}
c.set_infoz(infoz);
@@ -1651,21 +2090,22 @@ class HashSetResizeHelper {
// PRECONDITIONS:
// 1. GrowIntoSingleGroupShuffleControlBytes was already called.
template <class PolicyTraits, class Alloc>
- void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
- typename PolicyTraits::slot_type* old_slots) {
+ void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
assert(old_capacity_ < Group::kWidth / 2);
assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
using slot_type = typename PolicyTraits::slot_type;
assert(is_single_group(c.capacity()));
- auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
+ auto* new_slots = static_cast<slot_type*>(c.slot_array());
+ auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
size_t shuffle_bit = old_capacity_ / 2 + 1;
for (size_t i = 0; i < old_capacity_; ++i) {
- if (IsFull(old_ctrl_[i])) {
+ if (IsFull(old_ctrl()[i])) {
size_t new_i = i ^ shuffle_bit;
SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
+ PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
+ old_slots_ptr + i);
}
}
PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
@@ -1673,11 +2113,12 @@ class HashSetResizeHelper {
// Deallocates old backing array.
template <size_t AlignOfSlot, class CharAlloc>
- void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
- SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+ void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
+ SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
+ auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
- &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_),
- AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_));
+ &alloc_ref, old_ctrl() - layout.control_offset(),
+ layout.alloc_size(slot_size));
}
private:
@@ -1692,8 +2133,12 @@ class HashSetResizeHelper {
// Relocates control bytes and slots into new single group for
// transferable objects.
// Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
- void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
- size_t slot_size);
+ void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
+
+ // If there was an SOO slot and slots are transferable, transfers the SOO slot
+ // into the new heap allocation. Must be called only if
+ // IsGrowingIntoSingleGroupApplicable returned true.
+ void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
// Shuffle control bits deterministically to the next capacity.
// Returns offset for newly added element with given hash.
@@ -1726,6 +2171,13 @@ class HashSetResizeHelper {
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
size_t new_capacity) const;
+ // If the table was SOO, initializes new control bytes. `h2` is the control
+ // byte corresponding to the full slot. Must be called only if
+ // IsGrowingIntoSingleGroupApplicable returned true.
+ // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
+ void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
+ size_t new_capacity);
+
// Shuffle trivially transferable slots in the way consistent with
// GrowIntoSingleGroupShuffleControlBytes.
//
@@ -1739,8 +2191,7 @@ class HashSetResizeHelper {
// 1. new_slots are transferred from old_slots_ consistent with
// GrowIntoSingleGroupShuffleControlBytes.
// 2. Empty new_slots are *not* poisoned.
- void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
- void* new_slots,
+ void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
size_t slot_size) const;
// Poison empty slots that were transferred using the deterministic algorithm
@@ -1760,11 +2211,24 @@ class HashSetResizeHelper {
}
}
- ctrl_t* old_ctrl_;
+ HeapOrSoo old_heap_or_soo_;
size_t old_capacity_;
bool had_infoz_;
+ bool was_soo_;
+ bool had_soo_slot_;
+ // Either null infoz or a pre-sampled forced infoz for SOO tables.
+ HashtablezInfoHandle forced_infoz_;
};
+inline void PrepareInsertCommon(CommonFields& common) {
+ common.increment_size();
+ common.maybe_increment_generation_on_insert();
+}
+
+// Like prepare_insert, but for the case of inserting into a full SOO table.
+size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
+ CommonFields& common);
+
// PolicyFunctions bundles together some information for a particular
// raw_hash_set<T, ...> instantiation. This information is passed to
// type-erased functions that want to do small amounts of type-specific
@@ -1772,21 +2236,29 @@ class HashSetResizeHelper {
struct PolicyFunctions {
size_t slot_size;
+ // Returns the pointer to the hash function stored in the set.
+ const void* (*hash_fn)(const CommonFields& common);
+
// Returns the hash of the pointed-to slot.
- size_t (*hash_slot)(void* set, void* slot);
+ size_t (*hash_slot)(const void* hash_fn, void* slot);
- // Transfer the contents of src_slot to dst_slot.
+ // Transfers the contents of src_slot to dst_slot.
void (*transfer)(void* set, void* dst_slot, void* src_slot);
- // Deallocate the backing store from common.
+ // Deallocates the backing store from common.
void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
+
+ // Resizes set to the new capacity.
+ // Arguments are used as in raw_hash_set::resize_impl.
+ void (*resize)(CommonFields& common, size_t new_capacity,
+ HashtablezInfoHandle forced_infoz);
};
// ClearBackingArray clears the backing array, either modifying it in place,
// or creating a new one based on the value of "reuse".
// REQUIRES: c.capacity > 0
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
- bool reuse);
+ bool reuse, bool soo_enabled);
// Type-erased version of raw_hash_set::erase_meta_only.
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
@@ -1817,9 +2289,26 @@ ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
memcpy(dst, src, SizeOfSlot);
}
-// Type-erased version of raw_hash_set::drop_deletes_without_resize.
-void DropDeletesWithoutResize(CommonFields& common,
- const PolicyFunctions& policy, void* tmp_space);
+// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
+const void* GetHashRefForEmptyHasher(const CommonFields& common);
+
+// Given the hash of a value not currently in the table and the first empty
+// slot in the probe sequence, finds a viable slot index to insert it at.
+//
+// In case there's no space left, the table can be resized or rehashed
+// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
+//
+// In the case of absence of deleted slots and positive growth_left, the element
+// can be inserted in the provided `target` position.
+//
+// When the table has deleted slots (according to GrowthInfo), the target
+// position will be searched one more time using `find_first_non_full`.
+//
+// REQUIRES: Table is not SOO.
+// REQUIRES: At least one non-full slot available.
+// REQUIRES: `target` is a valid empty position to insert.
+size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
+ const PolicyFunctions& policy);
// A SwissTable.
//
@@ -1875,6 +2364,26 @@ class raw_hash_set {
using key_arg = typename KeyArgImpl::template type<K, key_type>;
private:
+ // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
+ // after CommonFields to allow inlining larger slot_types (e.g. std::string),
+ // but it's a bit complicated if we want to support incomplete mapped_type in
+ // flat_hash_map. We could potentially do this for flat_hash_set and for an
+ // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
+ // types, strings, cords, and pairs/tuples of allowlisted types.
+ constexpr static bool SooEnabled() {
+ return PolicyTraits::soo_enabled() &&
+ sizeof(slot_type) <= sizeof(HeapOrSoo) &&
+ alignof(slot_type) <= alignof(HeapOrSoo);
+ }
+
+ // Whether `size` fits in the SOO capacity of this table.
+ bool fits_in_soo(size_t size) const {
+ return SooEnabled() && size <= SooCapacity();
+ }
+ // Whether this table is in SOO mode or non-SOO mode.
+ bool is_soo() const { return fits_in_soo(capacity()); }
+ bool is_full_soo() const { return is_soo() && !empty(); }
+
// Give an early error when key_type is not hashable/eq.
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
@@ -1928,6 +2437,7 @@ class raw_hash_set {
class iterator : private HashSetIteratorGenerationInfo {
friend class raw_hash_set;
+ friend struct HashtableFreeFunctionsAccess;
public:
using iterator_category = std::forward_iterator_tag;
@@ -1958,6 +2468,7 @@ class raw_hash_set {
++ctrl_;
++slot_;
skip_empty_or_deleted();
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
return *this;
}
// PRECONDITION: not an end() iterator.
@@ -1988,22 +2499,31 @@ class raw_hash_set {
// not equal to any end iterator.
ABSL_ASSUME(ctrl != nullptr);
}
+ // This constructor is used in begin() to avoid an MSan
+ // use-of-uninitialized-value error. Delegating from this constructor to
+ // the previous one doesn't avoid the error.
+ iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
+ const GenerationType* generation_ptr)
+ : HashSetIteratorGenerationInfo(generation_ptr),
+ ctrl_(ctrl),
+ slot_(to_slot(slot.get())) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_ASSUME(ctrl != nullptr);
+ }
// For end() iterators.
explicit iterator(const GenerationType* generation_ptr)
: HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
- // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
- // they reach one.
- //
- // If a sentinel is reached, we null `ctrl_` out instead.
+ // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
+ // `slot_` until they reach one.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift =
- GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
+ GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
- if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
ctrl_t* control() const { return ctrl_; }
@@ -2091,8 +2611,9 @@ class raw_hash_set {
size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : settings_(CommonFields{}, hash, eq, alloc) {
- if (bucket_count) {
+ : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
+ alloc) {
+ if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) {
resize(NormalizeCapacity(bucket_count));
}
}
@@ -2193,22 +2714,69 @@ class raw_hash_set {
that.alloc_ref())) {}
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
- : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+ : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
+ that.eq_ref(), a) {
const size_t size = that.size();
- if (size == 0) return;
- reserve(size);
- // Because the table is guaranteed to be empty, we can do something faster
- // than a full `insert`.
- for (const auto& v : that) {
- const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full_outofline(common(), hash);
- SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
- emplace_at(target.offset, v);
- common().maybe_increment_generation_on_insert();
- infoz().RecordInsert(hash, target.probe_length);
+ if (size == 0) {
+ return;
+ }
+ // We don't use `that.is_soo()` here because `that` can have non-SOO
+ // capacity but have a size that fits into SOO capacity.
+ if (fits_in_soo(size)) {
+ assert(size == 1);
+ common().set_full_soo();
+ emplace_at(soo_iterator(), *that.begin());
+ const HashtablezInfoHandle infoz = try_sample_soo();
+ if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
+ return;
+ }
+ assert(!that.is_soo());
+ const size_t cap = capacity();
+ // Note about single group tables:
+ // 1. It is correct to have any order of elements.
+ // 2. Order has to be non deterministic.
+ // 3. We are assigning elements with arbitrary `shift` starting from
+ // `capacity + shift` position.
+ // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
+ // modular arithmetic to traverse all positions, instead if cycling
+ // through a subset of positions. Odd numbers are coprime with any
+ // `capacity + 1` (2^N).
+ size_t offset = cap;
+ const size_t shift =
+ is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
+ IterateOverFullSlots(
+ that.common(), that.slot_array(),
+ [&](const ctrl_t* that_ctrl,
+ slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
+ if (shift == 0) {
+ // Big tables case. Position must be searched via probing.
+ // The table is guaranteed to be empty, so we can do faster than
+ // a full `insert`.
+ const size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(that_slot));
+ FindInfo target = find_first_non_full_outofline(common(), hash);
+ infoz().RecordInsert(hash, target.probe_length);
+ offset = target.offset;
+ } else {
+ // Small tables case. Next position is computed via shift.
+ offset = (offset + shift) & cap;
+ }
+ const h2_t h2 = static_cast<h2_t>(*that_ctrl);
+ assert( // We rely that hash is not changed for small tables.
+ H2(PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(that_slot))) == h2 &&
+ "hash function value changed unexpectedly during the copy");
+ SetCtrl(common(), offset, h2, sizeof(slot_type));
+ emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
+ common().maybe_increment_generation_on_insert();
+ });
+ if (shift != 0) {
+ // On small table copy we do not record individual inserts.
+ // RecordInsert requires hash, but it is unknown for small tables.
+ infoz().RecordStorageChanged(size, cap);
}
common().set_size(size);
- set_growth_left(growth_left() - size);
+ growth_info().OverwriteManyEmptyAsFull(size);
}
ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
@@ -2220,16 +2788,22 @@ class raw_hash_set {
// would create a nullptr functor that cannot be called.
// TODO(b/296061262): move instead of copying hash/eq/alloc.
// Note: we avoid using exchange for better generated code.
- settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
- that.alloc_ref()) {
- that.common() = CommonFields{};
+ settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
+ ? std::move(that.common())
+ : CommonFields{full_soo_tag_t{}},
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
+ if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
+ transfer(soo_slot(), that.soo_slot());
+ }
+ that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
- : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
+ : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
+ that.eq_ref(), a) {
if (a == that.alloc_ref()) {
- std::swap(common(), that.common());
+ swap_common(that);
maybe_increment_generation_or_rehash_on_move();
} else {
move_elements_allocs_unequal(std::move(that));
@@ -2264,8 +2838,12 @@ class raw_hash_set {
~raw_hash_set() { destructor_impl(); }
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto it = iterator_at(0);
+ if (ABSL_PREDICT_FALSE(empty())) return end();
+ if (is_soo()) return soo_iterator();
+ iterator it = {control(), common().slots_union(),
+ common().generation_ptr()};
it.skip_empty_or_deleted();
+ assert(IsFull(*it.control()));
return it;
}
iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
@@ -2285,7 +2863,14 @@ class raw_hash_set {
bool empty() const { return !size(); }
size_t size() const { return common().size(); }
- size_t capacity() const { return common().capacity(); }
+ size_t capacity() const {
+ const size_t cap = common().capacity();
+ // Compiler complains when using functions in assume so use local variables.
+ ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled();
+ ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity();
+ ABSL_ASSUME(!kEnabled || cap >= kCapacity);
+ return cap;
+ }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -2299,9 +2884,13 @@ class raw_hash_set {
const size_t cap = capacity();
if (cap == 0) {
// Already guaranteed to be empty; so nothing to do.
+ } else if (is_soo()) {
+ if (!empty()) destroy(soo_slot());
+ common().set_empty_soo();
} else {
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
+ SooEnabled());
}
common().set_reserved_growth(0);
common().set_reservation_size(0);
@@ -2432,7 +3021,7 @@ class raw_hash_set {
std::pair<iterator, bool> emplace(Args&&... args)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+ slot_type* slot = to_slot(&raw);
construct(slot, std::forward<Args>(args)...);
const auto& elem = PolicyTraits::element(slot);
@@ -2496,11 +3085,11 @@ class raw_hash_set {
F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = find_or_prepare_insert(key);
if (res.second) {
- slot_type* slot = slot_array() + res.first;
+ slot_type* slot = res.first.slot();
std::forward<F>(f)(constructor(&alloc_ref(), &slot));
assert(!slot);
}
- return iterator_at(res.first);
+ return res.first;
}
// Extension API: support for heterogeneous keys.
@@ -2524,7 +3113,7 @@ class raw_hash_set {
// this method returns void to reduce algorithmic complexity to O(1). The
// iterator is invalidated, so any increment should be done before calling
// erase. In order to erase while iterating across a map, use the following
- // idiom (which also works for standard containers):
+ // idiom (which also works for some standard containers):
//
// for (auto it = m.begin(), end = m.end(); it != end;) {
// // `erase()` will invalidate `it`, so advance `it` first.
@@ -2540,7 +3129,11 @@ class raw_hash_set {
void erase(iterator it) {
AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
destroy(it.slot());
- erase_meta_only(it);
+ if (is_soo()) {
+ common().set_empty_soo();
+ } else {
+ erase_meta_only(it);
+ }
}
iterator erase(const_iterator first,
@@ -2548,12 +3141,19 @@ class raw_hash_set {
// We check for empty first because ClearBackingArray requires that
// capacity() > 0 as a precondition.
if (empty()) return end();
+ if (first == last) return last.inner_;
+ if (is_soo()) {
+ destroy(soo_slot());
+ common().set_empty_soo();
+ return end();
+ }
if (first == begin() && last == end()) {
// TODO(ezb): we access control bytes in destroy_slots so it could make
// sense to combine destroy_slots and ClearBackingArray to avoid cache
// misses when the table is large. Note that we also do this in clear().
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
+ SooEnabled());
common().set_reserved_growth(common().reservation_size());
return end();
}
@@ -2568,13 +3168,21 @@ class raw_hash_set {
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
assert(this != &src);
+ // Returns whether insertion took place.
+ const auto insert_slot = [this](slot_type* src_slot) {
+ return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
+ PolicyTraits::element(src_slot))
+ .second;
+ };
+
+ if (src.is_soo()) {
+ if (src.empty()) return;
+ if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
+ return;
+ }
for (auto it = src.begin(), e = src.end(); it != e;) {
auto next = std::next(it);
- if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
- PolicyTraits::element(it.slot()))
- .second) {
- src.erase_meta_only(it);
- }
+ if (insert_slot(it.slot())) src.erase_meta_only(it);
it = next;
}
}
@@ -2588,7 +3196,11 @@ class raw_hash_set {
AssertIsFull(position.control(), position.inner_.generation(),
position.inner_.generation_ptr(), "extract()");
auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
- erase_meta_only(position);
+ if (is_soo()) {
+ common().set_empty_soo();
+ } else {
+ erase_meta_only(position);
+ }
return node;
}
@@ -2605,7 +3217,7 @@ class raw_hash_set {
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
- swap(common(), that.common());
+ swap_common(that);
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
SwapAlloc(alloc_ref(), that.alloc_ref(),
@@ -2613,17 +3225,41 @@ class raw_hash_set {
}
void rehash(size_t n) {
- if (n == 0 && capacity() == 0) return;
- if (n == 0 && size() == 0) {
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
- return;
+ const size_t cap = capacity();
+ if (n == 0) {
+ if (cap == 0 || is_soo()) return;
+ if (empty()) {
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
+ SooEnabled());
+ return;
+ }
+ if (fits_in_soo(size())) {
+ // When the table is already sampled, we keep it sampled.
+ if (infoz().IsSampled()) {
+ const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
+ if (capacity() > kInitialSampledCapacity) {
+ resize(kInitialSampledCapacity);
+ }
+ // This asserts that we didn't lose sampling coverage in `resize`.
+ assert(infoz().IsSampled());
+ return;
+ }
+ alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
+ slot_type* tmp_slot = to_slot(slot_space);
+ transfer(tmp_slot, begin().slot());
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
+ SooEnabled());
+ transfer(soo_slot(), tmp_slot);
+ common().set_full_soo();
+ return;
+ }
}
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > capacity()) {
+ if (n == 0 || m > cap) {
resize(m);
// This is after resize, to ensure that we have completed the allocation
@@ -2633,7 +3269,9 @@ class raw_hash_set {
}
void reserve(size_t n) {
- if (n > size() + growth_left()) {
+ const size_t max_size_before_growth =
+ is_soo() ? SooCapacity() : size() + growth_left();
+ if (n > max_size_before_growth) {
size_t m = GrowthToLowerboundCapacity(n);
resize(NormalizeCapacity(m));
@@ -2666,6 +3304,7 @@ class raw_hash_set {
// specific benchmarks indicating its importance.
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
+ if (SooEnabled() ? is_soo() : capacity() == 0) return;
(void)key;
// Avoid probing if we won't be able to prefetch the addresses received.
#ifdef ABSL_HAVE_PREFETCH
@@ -2686,26 +3325,16 @@ class raw_hash_set {
template <class K = key_type>
iterator find(const key_arg<K>& key,
size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto seq = probe(common(), hash);
- slot_type* slot_ptr = slot_array();
- const ctrl_t* ctrl = control();
- while (true) {
- Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
- EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slot_ptr + seq.offset(i)))))
- return iterator_at(seq.offset(i));
- }
- if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
- seq.next();
- assert(seq.index() <= capacity() && "full table!");
- }
+ AssertHashEqConsistent(key);
+ if (is_soo()) return find_soo(key);
+ return find_non_soo(key, hash);
}
template <class K = key_type>
iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ AssertHashEqConsistent(key);
+ if (is_soo()) return find_soo(key);
prefetch_heap_block();
- return find(key, hash_ref()(key));
+ return find_non_soo(key, hash_ref()(key));
}
template <class K = key_type>
@@ -2716,8 +3345,7 @@ class raw_hash_set {
template <class K = key_type>
const_iterator find(const key_arg<K>& key) const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
- prefetch_heap_block();
- return find(key, hash_ref()(key));
+ return const_cast<raw_hash_set*>(this)->find(key);
}
template <class K = key_type>
@@ -2791,6 +3419,8 @@ class raw_hash_set {
friend struct absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess;
+ friend struct absl::container_internal::HashtableFreeFunctionsAccess;
+
struct FindElement {
template <class K, class... Args>
const_iterator operator()(const K& key, Args&&...) const {
@@ -2824,7 +3454,7 @@ class raw_hash_set {
if (res.second) {
s.emplace_at(res.first, std::forward<Args>(args)...);
}
- return {s.iterator_at(res.first), res.second};
+ return res;
}
raw_hash_set& s;
};
@@ -2835,11 +3465,11 @@ class raw_hash_set {
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
- s.transfer(s.slot_array() + res.first, &slot);
+ s.transfer(res.first.slot(), &slot);
} else if (do_destroy) {
s.destroy(&slot);
}
- return {s.iterator_at(res.first), res.second};
+ return res;
}
raw_hash_set& s;
// Constructed slot. Either moved into place or destroyed.
@@ -2858,17 +3488,55 @@ class raw_hash_set {
PolicyTraits::transfer(&alloc_ref(), to, from);
}
- inline void destroy_slots() {
- const size_t cap = capacity();
+ // TODO(b/289225379): consider having a helper class that has the impls for
+ // SOO functionality.
+ template <class K = key_type>
+ iterator find_soo(const key_arg<K>& key) {
+ assert(is_soo());
+ return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(soo_slot()))
+ ? end()
+ : soo_iterator();
+ }
+
+ template <class K = key_type>
+ iterator find_non_soo(const key_arg<K>& key, size_t hash) {
+ assert(!is_soo());
+ auto seq = probe(common(), hash);
const ctrl_t* ctrl = control();
- slot_type* slot = slot_array();
- for (size_t i = 0; i != cap; ++i) {
- if (IsFull(ctrl[i])) {
- destroy(slot + i);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ for (uint32_t i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slot_array() + seq.offset(i)))))
+ return iterator_at(seq.offset(i));
}
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
+ seq.next();
+ assert(seq.index() <= capacity() && "full table!");
}
}
+ // Conditionally samples hashtablez for SOO tables. This should be called on
+ // insertion into an empty SOO table and in copy construction when the size
+ // can fit in SOO capacity.
+ inline HashtablezInfoHandle try_sample_soo() {
+ assert(is_soo());
+ if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
+ return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
+ SooCapacity());
+ }
+
+ inline void destroy_slots() {
+ assert(!is_soo());
+ if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
+ IterateOverFullSlots(
+ common(), slot_array(),
+ [&](const ctrl_t*, slot_type* slot)
+ ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
+ }
+
inline void dealloc() {
assert(capacity() != 0);
// Unpoison before returning the memory to the allocator.
@@ -2881,6 +3549,12 @@ class raw_hash_set {
inline void destructor_impl() {
if (capacity() == 0) return;
+ if (is_soo()) {
+ if (!empty()) {
+ ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
+ }
+ return;
+ }
destroy_slots();
dealloc();
}
@@ -2890,10 +3564,16 @@ class raw_hash_set {
// This merely updates the pertinent control byte. This can be used in
// conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
+ assert(!is_soo());
EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
sizeof(slot_type));
}
+ size_t hash_of(slot_type* slot) const {
+ return PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(slot));
+ }
+
// Resizes table to the new capacity and move all elements to the new
// positions accordingly.
//
@@ -2902,143 +3582,165 @@ class raw_hash_set {
// HashSetResizeHelper::FindFirstNonFullAfterResize(
// common(), old_capacity, hash)
// can be called right after `resize`.
- ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
+ void resize(size_t new_capacity) {
+ raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
+ }
+
+ // As above, except that we also accept a pre-sampled, forced infoz for
+ // SOO tables, since they need to switch from SOO to heap in order to
+ // store the infoz.
+ void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
+ assert(forced_infoz.IsSampled());
+ raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
+ forced_infoz);
+ }
+
+ // Resizes set to the new capacity.
+ // It is a static function in order to use its pointer in GetPolicyFunctions.
+ ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
+ CommonFields& common, size_t new_capacity,
+ HashtablezInfoHandle forced_infoz) {
+ raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
assert(IsValidCapacity(new_capacity));
- HashSetResizeHelper resize_helper(common());
- auto* old_slots = slot_array();
- common().set_capacity(new_capacity);
+ assert(!set->fits_in_soo(new_capacity));
+ const bool was_soo = set->is_soo();
+ const bool had_soo_slot = was_soo && !set->empty();
+ const ctrl_t soo_slot_h2 =
+ had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
+ : ctrl_t::kEmpty;
+ HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
+ forced_infoz);
+ // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
+ // HashSetResizeHelper constructor because it can't transfer slots when
+ // transfer_uses_memcpy is false.
+ // TODO(b/289225379): try to handle more of the SOO cases inside
+ // InitializeSlots. See comment on cl/555990034 snapshot #63.
+ if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
+ resize_helper.old_heap_or_soo() = common.heap_or_soo();
+ } else {
+ set->transfer(set->to_slot(resize_helper.old_soo_data()),
+ set->soo_slot());
+ }
+ common.set_capacity(new_capacity);
// Note that `InitializeSlots` does different number initialization steps
// depending on the values of `transfer_uses_memcpy` and capacities.
// Refer to the comment in `InitializeSlots` for more details.
const bool grow_single_group =
resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
PolicyTraits::transfer_uses_memcpy(),
- alignof(slot_type)>(
- common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
- CharAlloc(alloc_ref()));
+ SooEnabled(), alignof(slot_type)>(
+ common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
+ sizeof(value_type));
- if (resize_helper.old_capacity() == 0) {
+ // In the SooEnabled() case, capacity is never 0 so we don't check.
+ if (!SooEnabled() && resize_helper.old_capacity() == 0) {
// InitializeSlots did all the work including infoz().RecordRehash().
return;
}
+ assert(resize_helper.old_capacity() > 0);
+ // Nothing more to do in this case.
+ if (was_soo && !had_soo_slot) return;
+ slot_type* new_slots = set->slot_array();
if (grow_single_group) {
if (PolicyTraits::transfer_uses_memcpy()) {
// InitializeSlots did all the work.
return;
}
- // We want GrowSizeIntoSingleGroup to be called here in order to make
- // InitializeSlots not depend on PolicyTraits.
- resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
- old_slots);
+ if (was_soo) {
+ set->transfer(new_slots + resize_helper.SooSlotIndex(),
+ to_slot(resize_helper.old_soo_data()));
+ return;
+ } else {
+ // We want GrowSizeIntoSingleGroup to be called here in order to make
+ // InitializeSlots not depend on PolicyTraits.
+ resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
+ set->alloc_ref());
+ }
} else {
// InitializeSlots prepares control bytes to correspond to empty table.
- auto* new_slots = slot_array();
- size_t total_probe_length = 0;
- for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
- if (IsFull(resize_helper.old_ctrl()[i])) {
- size_t hash = PolicyTraits::apply(
- HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(common(), hash);
- size_t new_i = target.offset;
- total_probe_length += target.probe_length;
- SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
- transfer(new_slots + new_i, old_slots + i);
+ const auto insert_slot = [&](slot_type* slot) {
+ size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
+ PolicyTraits::element(slot));
+ auto target = find_first_non_full(common, hash);
+ SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
+ set->transfer(new_slots + target.offset, slot);
+ return target.probe_length;
+ };
+ if (was_soo) {
+ insert_slot(to_slot(resize_helper.old_soo_data()));
+ return;
+ } else {
+ auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
+ if (IsFull(resize_helper.old_ctrl()[i])) {
+ total_probe_length += insert_slot(old_slots + i);
+ }
}
+ common.infoz().RecordRehash(total_probe_length);
}
- infoz().RecordRehash(total_probe_length);
}
- resize_helper.DeallocateOld<alignof(slot_type)>(
- CharAlloc(alloc_ref()), sizeof(slot_type),
- const_cast<std::remove_const_t<slot_type>*>(old_slots));
+ resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
+ sizeof(slot_type));
}
- // Prunes control bytes to remove as many tombstones as possible.
- //
- // See the comment on `rehash_and_grow_if_necessary()`.
- inline void drop_deletes_without_resize() {
- // Stack-allocate space for swapping elements.
- alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
- DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
- }
+ // Casting directly from e.g. char* to slot_type* can cause compilation errors
+ // on objective-C. This function converts to void* first, avoiding the issue.
+ static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
- // Called whenever the table *might* need to conditionally grow.
- //
- // This function is an optimization opportunity to perform a rehash even when
- // growth is unnecessary, because vacating tombstones is beneficial for
- // performance in the long-run.
- void rehash_and_grow_if_necessary() {
- const size_t cap = capacity();
- if (cap > Group::kWidth &&
- // Do these calculations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= cap * uint64_t{25}) {
- // Squash DELETED without growing if there is enough capacity.
- //
- // Rehash in place if the current size is <= 25/32 of capacity.
- // Rationale for such a high factor: 1) drop_deletes_without_resize() is
- // faster than resize, and 2) it takes quite a bit of work to add
- // tombstones. In the worst case, seems to take approximately 4
- // insert/erase pairs to create a single tombstone and so if we are
- // rehashing because of tombstones, we can afford to rehash-in-place as
- // long as we are reclaiming at least 1/8 the capacity without doing more
- // than 2X the work. (Where "work" is defined to be size() for rehashing
- // or rehashing in place, and 1 for an insert or erase.) But rehashing in
- // place is faster per operation than inserting or even doubling the size
- // of the table, so we actually afford to reclaim even less space from a
- // resize-in-place. The decision is to rehash in place if we can reclaim
- // at about 1/8th of the usable capacity (specifically 3/28 of the
- // capacity) which means that the total cost of rehashing will be a small
- // fraction of the total work.
- //
- // Here is output of an experiment using the BM_CacheInSteadyState
- // benchmark running the old case (where we rehash-in-place only if we can
- // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
- // if we can recover 3/32*capacity).
- //
- // Note that although in the worst-case number of rehashes jumped up from
- // 15 to 190, but the number of operations per second is almost the same.
- //
- // Abridged output of running BM_CacheInSteadyState benchmark from
- // raw_hash_set_benchmark. N is the number of insert/erase operations.
- //
- // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
- // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
- // 448 | 145284 0.44 18 | 140118 0.44 19
- // 493 | 152546 0.24 11 | 151417 0.48 28
- // 538 | 151439 0.26 11 | 151152 0.53 38
- // 583 | 151765 0.28 11 | 150572 0.57 50
- // 628 | 150241 0.31 11 | 150853 0.61 66
- // 672 | 149602 0.33 12 | 150110 0.66 90
- // 717 | 149998 0.35 12 | 149531 0.70 129
- // 762 | 149836 0.37 13 | 148559 0.74 190
- // 807 | 149736 0.39 14 | 151107 0.39 14
- // 852 | 150204 0.42 15 | 151019 0.42 15
- drop_deletes_without_resize();
+ // Requires that lhs does not have a full SOO slot.
+ static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
+ CommonFields& lhs, CommonFields&& rhs) {
+ if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) {
+ lhs = std::move(rhs);
} else {
- // Otherwise grow the container.
- resize(NextCapacity(cap));
+ lhs.move_non_heap_or_soo_fields(rhs);
+ // TODO(b/303305702): add reentrancy guard.
+ PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
+ to_slot(rhs.soo_data()));
}
}
+ // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
+ // aren't allowed to do so.
+ void swap_common(raw_hash_set& that) {
+ using std::swap;
+ if (PolicyTraits::transfer_uses_memcpy()) {
+ swap(common(), that.common());
+ return;
+ }
+ CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>();
+ const bool that_is_full_soo = that.is_full_soo();
+ move_common(that_is_full_soo, that.alloc_ref(), tmp,
+ std::move(that.common()));
+ move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
+ move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
+ }
+
void maybe_increment_generation_or_rehash_on_move() {
- common().maybe_increment_generation_on_move();
+ if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) {
+ return;
+ }
+ common().increment_generation();
if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
resize(capacity());
}
}
- template<bool propagate_alloc>
+ template <bool propagate_alloc>
raw_hash_set& assign_impl(raw_hash_set&& that) {
// We don't bother checking for this/that aliasing. We just need to avoid
// breaking the invariants in that case.
destructor_impl();
- common() = std::move(that.common());
+ move_common(that.is_full_soo(), that.alloc_ref(), common(),
+ std::move(that.common()));
// TODO(b/296061262): move instead of copying hash/eq/alloc.
hash_ref() = that.hash_ref();
eq_ref() = that.eq_ref();
CopyAlloc(alloc_ref(), that.alloc_ref(),
std::integral_constant<bool, propagate_alloc>());
- that.common() = CommonFields{};
+ that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
return *this;
}
@@ -3051,8 +3753,8 @@ class raw_hash_set {
insert(std::move(PolicyTraits::element(it.slot())));
that.destroy(it.slot());
}
- that.dealloc();
- that.common() = CommonFields{};
+ if (!that.is_soo()) that.dealloc();
+ that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
return *this;
}
@@ -3078,12 +3780,30 @@ class raw_hash_set {
return move_elements_allocs_unequal(std::move(that));
}
- protected:
- // Attempts to find `key` in the table; if it isn't found, returns a slot that
- // the value can be inserted into, with the control byte already set to
- // `key`'s H2.
template <class K>
- std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
+ if (empty()) {
+ const HashtablezInfoHandle infoz = try_sample_soo();
+ if (infoz.IsSampled()) {
+ resize_with_soo_infoz(infoz);
+ } else {
+ common().set_full_soo();
+ return {soo_iterator(), true};
+ }
+ } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(soo_slot()))) {
+ return {soo_iterator(), false};
+ } else {
+ resize(NextCapacity(SooCapacity()));
+ }
+ const size_t index =
+ PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
+ return {iterator_at(index), true};
+ }
+
+ template <class K>
+ std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
+ assert(!is_soo());
prefetch_heap_block();
auto hash = hash_ref()(key);
auto seq = probe(common(), hash);
@@ -3094,65 +3814,92 @@ class raw_hash_set {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slot_array() + seq.offset(i)))))
- return {seq.offset(i), false};
+ return {iterator_at(seq.offset(i)), false};
+ }
+ auto mask_empty = g.MaskEmpty();
+ if (ABSL_PREDICT_TRUE(mask_empty)) {
+ size_t target = seq.offset(
+ GetInsertionOffset(mask_empty, capacity(), hash, control()));
+ return {iterator_at(PrepareInsertNonSoo(common(), hash,
+ FindInfo{target, seq.index()},
+ GetPolicyFunctions())),
+ true};
}
- if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
assert(seq.index() <= capacity() && "full table!");
}
- return {prepare_insert(hash), true};
}
- // Given the hash of a value not currently in the table, finds the next
- // viable slot index to insert it at.
- //
- // REQUIRES: At least one non-full slot available.
- size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- const bool rehash_for_bug_detection =
- common().should_rehash_for_bug_detection_on_insert();
- if (rehash_for_bug_detection) {
- // Move to a different heap allocation in order to detect bugs.
- const size_t cap = capacity();
- resize(growth_left() > 0 ? cap : NextCapacity(cap));
- }
- auto target = find_first_non_full(common(), hash);
- if (!rehash_for_bug_detection &&
- ABSL_PREDICT_FALSE(growth_left() == 0 &&
- !IsDeleted(control()[target.offset]))) {
- size_t old_capacity = capacity();
- rehash_and_grow_if_necessary();
- // NOTE: It is safe to use `FindFirstNonFullAfterResize`.
- // `FindFirstNonFullAfterResize` must be called right after resize.
- // `rehash_and_grow_if_necessary` may *not* call `resize`
- // and perform `drop_deletes_without_resize` instead. But this
- // could happen only on big tables.
- // For big tables `FindFirstNonFullAfterResize` will always
- // fallback to normal `find_first_non_full`, so it is safe to use it.
- target = HashSetResizeHelper::FindFirstNonFullAfterResize(
- common(), old_capacity, hash);
- }
- common().increment_size();
- set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
- SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
- common().maybe_increment_generation_on_insert();
- infoz().RecordInsert(hash, target.probe_length);
- return target.offset;
+ protected:
+ // Asserts that hash and equal functors provided by the user are consistent,
+ // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
+ template <class K>
+ void AssertHashEqConsistent(ABSL_ATTRIBUTE_UNUSED const K& key) {
+#ifndef NDEBUG
+ if (empty()) return;
+
+ const size_t hash_of_arg = hash_ref()(key);
+ const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
+ const value_type& element = PolicyTraits::element(slot);
+ const bool is_key_equal =
+ PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
+ if (!is_key_equal) return;
+
+ const size_t hash_of_slot =
+ PolicyTraits::apply(HashElement{hash_ref()}, element);
+ const bool is_hash_equal = hash_of_arg == hash_of_slot;
+ if (!is_hash_equal) {
+ // In this case, we're going to crash. Do a couple of other checks for
+ // idempotence issues. Recalculating hash/eq here is also convenient for
+ // debugging with gdb/lldb.
+ const size_t once_more_hash_arg = hash_ref()(key);
+ assert(hash_of_arg == once_more_hash_arg && "hash is not idempotent.");
+ const size_t once_more_hash_slot =
+ PolicyTraits::apply(HashElement{hash_ref()}, element);
+ assert(hash_of_slot == once_more_hash_slot &&
+ "hash is not idempotent.");
+ const bool once_more_eq =
+ PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
+ assert(is_key_equal == once_more_eq && "equality is not idempotent.");
+ }
+ assert((!is_key_equal || is_hash_equal) &&
+ "eq(k1, k2) must imply that hash(k1) == hash(k2). "
+ "hash/eq functors are inconsistent.");
+ };
+
+ if (is_soo()) {
+ assert_consistent(/*unused*/ nullptr, soo_slot());
+ return;
+ }
+ // We only do validation for small tables so that it's constant time.
+ if (capacity() > 16) return;
+ IterateOverFullSlots(common(), slot_array(), assert_consistent);
+#endif
+ }
+
+ // Attempts to find `key` in the table; if it isn't found, returns an iterator
+ // where the value can be inserted into, with the control byte already set to
+ // `key`'s H2. Returns a bool indicating whether an insertion can take place.
+ template <class K>
+ std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
+ AssertHashEqConsistent(key);
+ if (is_soo()) return find_or_prepare_insert_soo(key);
+ return find_or_prepare_insert_non_soo(key);
}
// Constructs the value in the space pointed by the iterator. This only works
// after an unsuccessful find_or_prepare_insert() and before any other
// modifications happen in the raw_hash_set.
//
- // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
- // k is the key decomposed from `forward<Args>(args)...`, and the bool
- // returned by find_or_prepare_insert(k) was true.
+ // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
+ // the key decomposed from `forward<Args>(args)...`, and the bool returned by
+ // find_or_prepare_insert(k) was true.
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
template <class... Args>
- void emplace_at(size_t i, Args&&... args) {
- construct(slot_array() + i, std::forward<Args>(args)...);
+ void emplace_at(iterator iter, Args&&... args) {
+ construct(iter.slot(), std::forward<Args>(args)...);
- assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
- iterator_at(i) &&
+ assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
"constructed value does not match the lookup key");
}
@@ -3160,7 +3907,7 @@ class raw_hash_set {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return {control() + i, slot_array() + i, common().generation_ptr()};
+ return const_cast<raw_hash_set*>(this)->iterator_at(i);
}
reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
@@ -3178,13 +3925,25 @@ class raw_hash_set {
// side-effect.
//
// See `CapacityToGrowth()`.
- size_t growth_left() const { return common().growth_left(); }
- void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+ size_t growth_left() const {
+ assert(!is_soo());
+ return common().growth_left();
+ }
+
+ GrowthInfo& growth_info() {
+ assert(!is_soo());
+ return common().growth_info();
+ }
+ GrowthInfo growth_info() const {
+ assert(!is_soo());
+ return common().growth_info();
+ }
// Prefetch the heap-allocated memory region to resolve potential TLB and
// cache misses. This is intended to overlap with execution of calculating the
// hash for a key.
void prefetch_heap_block() const {
+ assert(!is_soo());
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
__builtin_prefetch(control(), 0, 1);
#endif
@@ -3193,11 +3952,31 @@ class raw_hash_set {
CommonFields& common() { return settings_.template get<0>(); }
const CommonFields& common() const { return settings_.template get<0>(); }
- ctrl_t* control() const { return common().control(); }
+ ctrl_t* control() const {
+ assert(!is_soo());
+ return common().control();
+ }
slot_type* slot_array() const {
+ assert(!is_soo());
return static_cast<slot_type*>(common().slot_array());
}
- HashtablezInfoHandle infoz() { return common().infoz(); }
+ slot_type* soo_slot() {
+ assert(is_soo());
+ return static_cast<slot_type*>(common().soo_data());
+ }
+ const slot_type* soo_slot() const {
+ return const_cast<raw_hash_set*>(this)->soo_slot();
+ }
+ iterator soo_iterator() {
+ return {SooControl(), soo_slot(), common().generation_ptr()};
+ }
+ const_iterator soo_iterator() const {
+ return const_cast<raw_hash_set*>(this)->soo_iterator();
+ }
+ HashtablezInfoHandle infoz() {
+ assert(!is_soo());
+ return common().infoz();
+ }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -3208,12 +3987,9 @@ class raw_hash_set {
return settings_.template get<3>();
}
- // Make type-specific functions for this type's PolicyFunctions struct.
- static size_t hash_slot_fn(void* set, void* slot) {
- auto* h = static_cast<raw_hash_set*>(set);
- return PolicyTraits::apply(
- HashElement{h->hash_ref()},
- PolicyTraits::element(static_cast<slot_type*>(slot)));
+ static const void* get_hash_ref_fn(const CommonFields& common) {
+ auto* h = reinterpret_cast<const raw_hash_set*>(&common);
+ return &h->hash_ref();
}
static void transfer_slot_fn(void* set, void* dst, void* src) {
auto* h = static_cast<raw_hash_set*>(set);
@@ -3236,13 +4012,18 @@ class raw_hash_set {
static const PolicyFunctions& GetPolicyFunctions() {
static constexpr PolicyFunctions value = {
sizeof(slot_type),
- &raw_hash_set::hash_slot_fn,
+ // TODO(b/328722020): try to type erase
+ // for standard layout and alignof(Hash) <= alignof(CommonFields).
+ std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
+ : &raw_hash_set::get_hash_ref_fn,
+ PolicyTraits::template get_hash_slot_fn<hasher>(),
PolicyTraits::transfer_uses_memcpy()
? TransferRelocatable<sizeof(slot_type)>
: &raw_hash_set::transfer_slot_fn,
(std::is_same<SlotAlloc, std::allocator<slot_type>>::value
? &DeallocateStandard<alignof(slot_type)>
: &raw_hash_set::dealloc_fn),
+ &raw_hash_set::resize_impl,
};
return value;
}
@@ -3252,22 +4033,78 @@ class raw_hash_set {
// fields that occur after CommonFields.
absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
allocator_type>
- settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
+ settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
+ key_equal{}, allocator_type{}};
+};
+
+// Friend access for free functions in raw_hash_set.h.
+struct HashtableFreeFunctionsAccess {
+ template <class Predicate, typename Set>
+ static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
+ if (c->empty()) {
+ return 0;
+ }
+ if (c->is_soo()) {
+ auto it = c->soo_iterator();
+ if (!pred(*it)) {
+ assert(c->size() == 1 && "hash table was modified unexpectedly");
+ return 0;
+ }
+ c->destroy(it.slot());
+ c->common().set_empty_soo();
+ return 1;
+ }
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
+ size_t num_deleted = 0;
+ IterateOverFullSlots(
+ c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
+ if (pred(Set::PolicyTraits::element(slot))) {
+ c->destroy(slot);
+ EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
+ sizeof(*slot));
+ ++num_deleted;
+ }
+ });
+ // NOTE: IterateOverFullSlots allow removal of the current element, so we
+ // verify the size additionally here.
+ assert(original_size_for_assert - num_deleted == c->size() &&
+ "hash table was modified unexpectedly");
+ return num_deleted;
+ }
+
+ template <class Callback, typename Set>
+ static void ForEach(Callback& cb, Set* c) {
+ if (c->empty()) {
+ return;
+ }
+ if (c->is_soo()) {
+ cb(*c->soo_iterator());
+ return;
+ }
+ using ElementTypeWithConstness = decltype(*c->begin());
+ IterateOverFullSlots(
+ c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
+ ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
+ cb(element);
+ });
+ }
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
Predicate& pred, raw_hash_set<P, H, E, A>* c) {
- const auto initial_size = c->size();
- for (auto it = c->begin(), last = c->end(); it != last;) {
- if (pred(*it)) {
- c->erase(it++);
- } else {
- ++it;
- }
- }
- return initial_size - c->size();
+ return HashtableFreeFunctionsAccess::EraseIf(pred, c);
+}
+
+// Calls `cb` for all elements in the container `c`.
+template <typename P, typename H, typename E, typename A, typename Callback>
+void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
+ return HashtableFreeFunctionsAccess::ForEach(cb, c);
+}
+template <typename P, typename H, typename E, typename A, typename Callback>
+void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
+ return HashtableFreeFunctionsAccess::ForEach(cb, c);
}
namespace hashtable_debug_internal {
@@ -3278,6 +4115,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t GetNumProbes(const Set& set,
const typename Set::key_type& key) {
+ if (set.is_soo()) return 0;
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
auto seq = probe(set.common(), hash);
@@ -3301,7 +4139,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity();
if (capacity == 0) return 0;
- size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
+ size_t m =
+ c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -3321,5 +4160,7 @@ ABSL_NAMESPACE_END
} // namespace absl
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
+#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h b/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
index 59995ae3e2..0f6e3479e6 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -33,14 +33,15 @@
#include <x86intrin.h>
#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
-#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__) && \
+ defined(_M_AMD64)
// MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ.
#include <intrin.h>
#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
-#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \
- defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) && \
+#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \
+ defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) && \
defined(__ARM_FEATURE_CRYPTO)
#include <arm_acle.h>
@@ -101,10 +102,11 @@ V128 V128_Xor(const V128 l, const V128 r);
// Produces an AND operation of |l| and |r|.
V128 V128_And(const V128 l, const V128 r);
-// Sets two 64 bit integers to one 128 bit vector. The order is reverse.
+// Sets the lower half of a 128 bit register to the given 64-bit value and
+// zeroes the upper half.
// dst[63:0] := |r|
-// dst[127:64] := |l|
-V128 V128_From2x64(const uint64_t l, const uint64_t r);
+// dst[127:64] := |0|
+V128 V128_From64WithZeroFill(const uint64_t r);
// Shift |l| right by |imm| bytes while shifting in zeros.
template <int imm>
@@ -121,8 +123,8 @@ uint64_t V128_Extract64(const V128 l);
// Extracts the low 64 bits from V128.
int64_t V128_Low64(const V128 l);
-// Left-shifts packed 64-bit integers in l by r.
-V128 V128_ShiftLeft64(const V128 l, const V128 r);
+// Add packed 64-bit integers in |l| and |r|.
+V128 V128_Add64(const V128 l, const V128 r);
#endif
@@ -170,8 +172,8 @@ inline V128 V128_Xor(const V128 l, const V128 r) { return _mm_xor_si128(l, r); }
inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); }
-inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
- return _mm_set_epi64x(static_cast<int64_t>(l), static_cast<int64_t>(r));
+inline V128 V128_From64WithZeroFill(const uint64_t r) {
+ return _mm_set_epi64x(static_cast<int64_t>(0), static_cast<int64_t>(r));
}
template <int imm>
@@ -191,8 +193,8 @@ inline uint64_t V128_Extract64(const V128 l) {
inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); }
-inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
- return _mm_sll_epi64(l, r);
+inline V128 V128_Add64(const V128 l, const V128 r) {
+ return _mm_add_epi64(l, r);
}
#elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD)
@@ -261,10 +263,12 @@ inline V128 V128_Xor(const V128 l, const V128 r) { return veorq_u64(l, r); }
inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); }
-inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
- return vcombine_u64(vcreate_u64(r), vcreate_u64(l));
+inline V128 V128_From64WithZeroFill(const uint64_t r){
+ constexpr uint64x2_t kZero = {0, 0};
+ return vsetq_lane_u64(r, kZero, 0);
}
+
template <int imm>
inline V128 V128_ShiftRight(const V128 l) {
return vreinterpretq_u64_s8(
@@ -285,9 +289,7 @@ inline int64_t V128_Low64(const V128 l) {
return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0);
}
-inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
- return vshlq_u64(l, vreinterpretq_s64_u64(r));
-}
+inline V128 V128_Add64(const V128 l, const V128 r) { return vaddq_u64(l, r); }
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_cord_state.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_cord_state.cc
index 28d04dc4ea..303a55593a 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_cord_state.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_cord_state.cc
@@ -17,6 +17,7 @@
#include <cassert>
#include "absl/base/config.h"
+#include "absl/base/no_destructor.h"
#include "absl/numeric/bits.h"
namespace absl {
@@ -24,14 +25,14 @@ ABSL_NAMESPACE_BEGIN
namespace crc_internal {
CrcCordState::RefcountedRep* CrcCordState::RefSharedEmptyRep() {
- static CrcCordState::RefcountedRep* empty = new CrcCordState::RefcountedRep;
+ static absl::NoDestructor<CrcCordState::RefcountedRep> empty;
assert(empty->count.load(std::memory_order_relaxed) >= 1);
assert(empty->rep.removed_prefix.length == 0);
assert(empty->rep.prefix_crc.empty());
- Ref(empty);
- return empty;
+ Ref(empty.get());
+ return empty.get();
}
CrcCordState::CrcCordState() : refcounted_rep_(new RefcountedRep) {}
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc
index 07795504e3..e34249f07d 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc
@@ -12,12 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <cstdint>
+#include <cstring>
#include <memory>
#include "absl/base/config.h"
#include "absl/crc/crc32c.h"
#include "absl/crc/internal/crc_memcpy.h"
+#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_x86_arm_combined.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
index 968e9ae359..38f61e9b1f 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
@@ -52,6 +52,7 @@
#include <cstring>
#include <memory>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
#include "absl/base/prefetch.h"
@@ -88,9 +89,11 @@ inline crc32c_t ShortCrcCopy(char* dst, const char* src, std::size_t length,
constexpr size_t kIntLoadsPerVec = sizeof(V128) / sizeof(uint64_t);
// Common function for copying the tails of multiple large regions.
+// Disable ubsan for benign unaligned access. See b/254108538.
template <size_t vec_regions, size_t int_regions>
-inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
- size_t region_size, size_t copy_rounds) {
+ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED inline void LargeTailCopy(
+ crc32c_t* crcs, char** dst, const char** src, size_t region_size,
+ size_t copy_rounds) {
std::array<V128, vec_regions> data;
std::array<uint64_t, kIntLoadsPerVec * int_regions> int_data;
@@ -127,8 +130,8 @@ inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
size_t data_index = i * kIntLoadsPerVec + j;
int_data[data_index] = *(usrc + j);
- crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
- static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+ crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
+ int_data[data_index])};
*(udst + j) = int_data[data_index];
}
@@ -155,8 +158,10 @@ class AcceleratedCrcMemcpyEngine : public CrcMemcpyEngine {
std::size_t length, crc32c_t initial_crc) const override;
};
+// Disable ubsan for benign unaligned access. See b/254108538.
template <size_t vec_regions, size_t int_regions>
-crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
+ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED crc32c_t
+AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
void* __restrict dst, const void* __restrict src, std::size_t length,
crc32c_t initial_crc) const {
constexpr std::size_t kRegions = vec_regions + int_regions;
@@ -196,7 +201,6 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
// Start work on the CRC: undo the XOR from the previous calculation or set up
// the initial value of the CRC.
- // initial_crc ^= kCrcDataXor;
initial_crc = crc32c_t{static_cast<uint32_t>(initial_crc) ^ kCrcDataXor};
// Do an initial alignment copy, so we can use aligned store instructions to
@@ -295,8 +299,8 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
// Load and CRC the data.
int_data[data_index] = *(usrc + i * kIntLoadsPerVec + k);
- crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
- static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+ crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
+ int_data[data_index])};
// Store the data.
*(udst + i * kIntLoadsPerVec + k) = int_data[data_index];
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc
index adc867f6b7..a56f1eb0e0 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <cstdint>
+#include <cstddef>
#include "absl/base/config.h"
#include "absl/crc/crc32c.h"
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
index 51eff4eddc..79dace34f4 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc
@@ -101,13 +101,17 @@ constexpr size_t kMediumCutoff = 2048;
namespace {
uint32_t multiply(uint32_t a, uint32_t b) {
- V128 shifts = V128_From2x64(0, 1);
- V128 power = V128_From2x64(0, a);
- V128 crc = V128_From2x64(0, b);
+ V128 power = V128_From64WithZeroFill(a);
+ V128 crc = V128_From64WithZeroFill(b);
V128 res = V128_PMulLow(power, crc);
- // Combine crc values
- res = V128_ShiftLeft64(res, shifts);
+ // Combine crc values.
+ //
+ // Adding res to itself is equivalent to multiplying by 2,
+ // or shifting left by 1. Addition is used as not all compilers
+ // are able to generate optimal code without this hint.
+ // https://godbolt.org/z/rr3fMnf39
+ res = V128_Add64(res, res);
return static_cast<uint32_t>(V128_Extract32<1>(res)) ^
CRC32_u32(0, static_cast<uint32_t>(V128_Low64(res)));
}
@@ -444,11 +448,11 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
V128 magic = *(reinterpret_cast<const V128*>(kClmulConstants) + bs - 1);
- V128 tmp = V128_From2x64(0, l64);
+ V128 tmp = V128_From64WithZeroFill(l64);
V128 res1 = V128_PMulLow(tmp, magic);
- tmp = V128_From2x64(0, l641);
+ tmp = V128_From64WithZeroFill(l641);
V128 res2 = V128_PMul10(tmp, magic);
V128 x = V128_Xor(res1, res2);
diff --git a/contrib/restricted/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h b/contrib/restricted/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h
index b3d94badfc..7ae83bdcbd 100644
--- a/contrib/restricted/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h
+++ b/contrib/restricted/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h
@@ -19,19 +19,8 @@
#include <intrin.h>
#endif
-#ifdef __SSE__
-#include <xmmintrin.h>
-#endif
-
-#ifdef __SSE2__
-#include <emmintrin.h>
-#endif
-
-#ifdef __SSE3__
-#include <pmmintrin.h>
-#endif
-
-#ifdef __AVX__
+#if defined(__SSE__) || defined(__AVX__)
+// Pulls in both SSE and AVX intrinsics.
#include <immintrin.h>
#endif
@@ -44,6 +33,7 @@
#include <cstdint>
#include <cstring>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
@@ -57,7 +47,9 @@ namespace crc_internal {
// memcpy can save 1 DRAM load of the destination cacheline.
constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
-// If the objects overlap, the behavior is undefined.
+// If the objects overlap, the behavior is undefined. Uses regular memcpy
+// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
+// at compile time.
inline void *non_temporal_store_memcpy(void *__restrict dst,
const void *__restrict src, size_t len) {
#if defined(__SSE3__) || defined(__aarch64__) || \
@@ -119,10 +111,20 @@ inline void *non_temporal_store_memcpy(void *__restrict dst,
#endif // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
}
+// If the objects overlap, the behavior is undefined. Uses regular memcpy
+// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
+// at compile time.
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::target) && \
+ (defined(__x86_64__) || defined(__i386__))
+[[gnu::target("avx")]]
+#endif
inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
const void *__restrict src,
size_t len) {
-#ifdef __AVX__
+ // This function requires AVX. For clang and gcc we compile it with AVX even
+ // if the translation unit isn't built with AVX support. This works because we
+ // only select this implementation at runtime if the CPU supports AVX.
+#if defined(__SSE3__) || (defined(_MSC_VER) && defined(__AVX__))
uint8_t *d = reinterpret_cast<uint8_t *>(dst);
const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
@@ -168,9 +170,8 @@ inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
}
return dst;
#else
- // Fallback to regular memcpy when AVX is not available.
return memcpy(dst, src, len);
-#endif // __AVX__
+#endif // __SSE3__ || (_MSC_VER && __AVX__)
}
} // namespace crc_internal
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.h b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.h
index 5e034785f3..4117facb56 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/failure_signal_handler.h
@@ -33,7 +33,7 @@
// }
//
// Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`,
-// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUS`, and `SIGTRAP`) will call the
// installed failure signal handler and provide debugging information to stderr.
//
// Note that you should *not* install the Abseil failure signal handler more
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/bounded_utf8_length_sequence.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/bounded_utf8_length_sequence.h
new file mode 100644
index 0000000000..188e06c4af
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/bounded_utf8_length_sequence.h
@@ -0,0 +1,126 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
+#define ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/numeric/bits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// A sequence of up to max_elements integers between 1 and 4 inclusive, whose
+// insertion operation computes the sum of all the elements before the insertion
+// point. This is useful in decoding Punycode, where one needs to know where in
+// a UTF-8 byte stream the n-th code point begins.
+//
+// BoundedUtf8LengthSequence is async-signal-safe and suitable for use in
+// symbolizing stack traces in a signal handler, provided max_elements is not
+// improvidently large. For inputs of lengths accepted by the Rust demangler,
+// up to a couple hundred code points, InsertAndReturnSumOfPredecessors should
+// run in a few dozen clock cycles, on par with the other arithmetic required
+// for Punycode decoding.
+template <uint32_t max_elements>
+class BoundedUtf8LengthSequence {
+ public:
+ // Constructs an empty sequence.
+ BoundedUtf8LengthSequence() = default;
+
+ // Inserts `utf_length` at position `index`, shifting any existing elements at
+ // or beyond `index` one position to the right. If the sequence is already
+ // full, the rightmost element is discarded.
+ //
+ // Returns the sum of the elements at positions 0 to `index - 1` inclusive.
+ // If `index` is greater than the number of elements already inserted, the
+ // excess positions in the range count 1 apiece.
+ //
+ // REQUIRES: index < max_elements and 1 <= utf8_length <= 4.
+ uint32_t InsertAndReturnSumOfPredecessors(
+ uint32_t index, uint32_t utf8_length) {
+ // The caller shouldn't pass out-of-bounds inputs, but if it does happen,
+ // clamp the values and try to continue. If we're being called from a
+ // signal handler, the last thing we want to do is crash. Emitting
+ // malformed UTF-8 is a lesser evil.
+ if (index >= max_elements) index = max_elements - 1;
+ if (utf8_length == 0 || utf8_length > 4) utf8_length = 1;
+
+ const uint32_t word_index = index/32;
+ const uint32_t bit_index = 2 * (index % 32);
+ const uint64_t ones_bit = uint64_t{1} << bit_index;
+
+ // Compute the sum of predecessors.
+ // - Each value from 1 to 4 is represented by a bit field with value from
+ // 0 to 3, so the desired sum is index plus the sum of the
+ // representations actually stored.
+ // - For each bit field, a set low bit should contribute 1 to the sum, and
+ // a set high bit should contribute 2.
+ // - Another way to say the same thing is that each set bit contributes 1,
+ // and each set high bit contributes an additional 1.
+ // - So the sum we want is index + popcount(everything) + popcount(bits in
+ // odd positions).
+ const uint64_t odd_bits_mask = 0xaaaaaaaaaaaaaaaa;
+ const uint64_t lower_seminibbles_mask = ones_bit - 1;
+ const uint64_t higher_seminibbles_mask = ~lower_seminibbles_mask;
+ const uint64_t same_word_bits_below_insertion =
+ rep_[word_index] & lower_seminibbles_mask;
+ int full_popcount = absl::popcount(same_word_bits_below_insertion);
+ int odd_popcount =
+ absl::popcount(same_word_bits_below_insertion & odd_bits_mask);
+ for (uint32_t j = word_index; j > 0; --j) {
+ const uint64_t word_below_insertion = rep_[j - 1];
+ full_popcount += absl::popcount(word_below_insertion);
+ odd_popcount += absl::popcount(word_below_insertion & odd_bits_mask);
+ }
+ const uint32_t sum_of_predecessors =
+ index + static_cast<uint32_t>(full_popcount + odd_popcount);
+
+ // Now insert utf8_length's representation, shifting successors up one
+ // place.
+ for (uint32_t j = max_elements/32 - 1; j > word_index; --j) {
+ rep_[j] = (rep_[j] << 2) | (rep_[j - 1] >> 62);
+ }
+ rep_[word_index] =
+ (rep_[word_index] & lower_seminibbles_mask) |
+ (uint64_t{utf8_length - 1} << bit_index) |
+ ((rep_[word_index] & higher_seminibbles_mask) << 2);
+
+ return sum_of_predecessors;
+ }
+
+ private:
+ // If the (32 * i + j)-th element of the represented sequence has the value k
+ // (0 <= j < 32, 1 <= k <= 4), then bits 2 * j and 2 * j + 1 of rep_[i]
+ // contain the seminibble (k - 1).
+ //
+ // In particular, the zero-initialization of rep_ makes positions not holding
+ // any inserted element count as 1 in InsertAndReturnSumOfPredecessors.
+ //
+ // Example: rep_ = {0xb1, ... the rest zeroes ...} represents the sequence
+ // (2, 1, 4, 3, ... the rest 1's ...). Constructing the sequence of Unicode
+ // code points "Àa🂻中" = {U+00C0, U+0061, U+1F0BB, U+4E2D} (among many
+ // other examples) would yield this value of rep_.
+ static_assert(max_elements > 0 && max_elements % 32 == 0,
+ "max_elements must be a positive multiple of 32");
+ uint64_t rep_[max_elements/32] = {};
+};
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc
new file mode 100644
index 0000000000..43b46bf940
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc
@@ -0,0 +1,258 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/decode_rust_punycode.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
+#include "absl/debugging/internal/utf8_for_code_point.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+namespace {
+
+// Decoding Punycode requires repeated random-access insertion into a stream of
+// variable-length UTF-8 code-point encodings. We need this to be tolerably
+// fast (no N^2 slowdown for unfortunate inputs), and we can't allocate any data
+// structures on the heap (async-signal-safety).
+//
+// It is pragmatic to impose a moderately low limit on the identifier length and
+// bail out if we ever hit it. Then BoundedUtf8LengthSequence efficiently
+// determines where to insert the next code point, and memmove efficiently makes
+// room for it.
+//
+// The chosen limit is a round number several times larger than identifiers
+// expected in practice, yet still small enough that a memmove of this many
+// UTF-8 characters is not much more expensive than the division and modulus
+// operations that Punycode decoding requires.
+constexpr uint32_t kMaxChars = 256;
+
+// Constants from RFC 3492 section 5.
+constexpr uint32_t kBase = 36, kTMin = 1, kTMax = 26, kSkew = 38, kDamp = 700;
+
+constexpr uint32_t kMaxCodePoint = 0x10ffff;
+
+// Overflow threshold in DecodeRustPunycode's inner loop; see comments there.
+constexpr uint32_t kMaxI = 1 << 30;
+
+// If punycode_begin .. punycode_end begins with a prefix matching the regular
+// expression [0-9a-zA-Z_]+_, removes that prefix, copies all but the final
+// underscore into out_begin .. out_end, sets num_ascii_chars to the number of
+// bytes copied, and returns true. (A prefix of this sort represents the
+// nonempty subsequence of ASCII characters in the corresponding plaintext.)
+//
+// If punycode_begin .. punycode_end does not contain an underscore, sets
+// num_ascii_chars to zero and returns true. (The encoding of a plaintext
+// without any ASCII characters does not carry such a prefix.)
+//
+// Returns false and zeroes num_ascii_chars on failure (either parse error or
+// not enough space in the output buffer).
+bool ConsumeOptionalAsciiPrefix(const char*& punycode_begin,
+ const char* const punycode_end,
+ char* const out_begin,
+ char* const out_end,
+ uint32_t& num_ascii_chars) {
+ num_ascii_chars = 0;
+
+ // Remember the last underscore if any. Also use the same string scan to
+ // reject any ASCII bytes that do not belong in an identifier, including NUL,
+ // as well as non-ASCII bytes, which should have been delta-encoded instead.
+ int last_underscore = -1;
+ for (int i = 0; i < punycode_end - punycode_begin; ++i) {
+ const char c = punycode_begin[i];
+ if (c == '_') {
+ last_underscore = i;
+ continue;
+ }
+ // We write out the meaning of absl::ascii_isalnum rather than call that
+ // function because its documentation does not promise it will remain
+ // async-signal-safe under future development.
+ if ('a' <= c && c <= 'z') continue;
+ if ('A' <= c && c <= 'Z') continue;
+ if ('0' <= c && c <= '9') continue;
+ return false;
+ }
+
+ // If there was no underscore, that means there were no ASCII characters in
+ // the plaintext, so there is no prefix to consume. Our work is done.
+ if (last_underscore < 0) return true;
+
+ // Otherwise there will be an underscore delimiter somewhere. It can't be
+ // initial because then there would be no ASCII characters to its left, and no
+ // delimiter would have been added in that case.
+ if (last_underscore == 0) return false;
+
+ // Any other position is reasonable. Make sure there's room in the buffer.
+ if (last_underscore + 1 > out_end - out_begin) return false;
+
+ // Consume and write out the ASCII characters.
+ num_ascii_chars = static_cast<uint32_t>(last_underscore);
+ std::memcpy(out_begin, punycode_begin, num_ascii_chars);
+ out_begin[num_ascii_chars] = '\0';
+ punycode_begin += num_ascii_chars + 1;
+ return true;
+}
+
+// Returns the value of `c` as a base-36 digit according to RFC 3492 section 5,
+// or -1 if `c` is not such a digit.
+int DigitValue(char c) {
+ if ('0' <= c && c <= '9') return c - '0' + 26;
+ if ('a' <= c && c <= 'z') return c - 'a';
+ if ('A' <= c && c <= 'Z') return c - 'A';
+ return -1;
+}
+
+// Consumes the next delta encoding from punycode_begin .. punycode_end,
+// updating i accordingly. Returns true on success. Returns false on parse
+// failure or arithmetic overflow.
+bool ScanNextDelta(const char*& punycode_begin, const char* const punycode_end,
+ uint32_t bias, uint32_t& i) {
+ uint64_t w = 1; // 64 bits to prevent overflow in w *= kBase - t
+
+ // "for k = base to infinity in steps of base do begin ... end" in RFC 3492
+ // section 6.2. Each loop iteration scans one digit of the delta.
+ for (uint32_t k = kBase; punycode_begin != punycode_end; k += kBase) {
+ const int digit_value = DigitValue(*punycode_begin++);
+ if (digit_value < 0) return false;
+
+ // Compute this in 64-bit arithmetic so we can check for overflow afterward.
+ const uint64_t new_i = i + static_cast<uint64_t>(digit_value) * w;
+
+ // Valid deltas are bounded by (#chars already emitted) * kMaxCodePoint, but
+ // invalid input could encode an arbitrarily large delta. Nip that in the
+ // bud here.
+ static_assert(
+ kMaxI >= kMaxChars * kMaxCodePoint,
+ "kMaxI is too small to prevent spurious failures on good input");
+ if (new_i > kMaxI) return false;
+
+ static_assert(
+ kMaxI < (uint64_t{1} << 32),
+ "Make kMaxI smaller or i 64 bits wide to prevent silent wraparound");
+ i = static_cast<uint32_t>(new_i);
+
+ // Compute the threshold that determines whether this is the last digit and
+ // (if not) what the next digit's place value will be. This logic from RFC
+ // 3492 section 6.2 is explained in section 3.3.
+ uint32_t t;
+ if (k <= bias + kTMin) {
+ t = kTMin;
+ } else if (k >= bias + kTMax) {
+ t = kTMax;
+ } else {
+ t = k - bias;
+ }
+ if (static_cast<uint32_t>(digit_value) < t) return true;
+
+ // If this gets too large, the range check on new_i in the next iteration
+ // will catch it. We know this multiplication will not overwrap because w
+ // is 64 bits wide.
+ w *= kBase - t;
+ }
+ return false;
+}
+
+} // namespace
+
+absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
+ const char* punycode_begin = options.punycode_begin;
+ const char* const punycode_end = options.punycode_end;
+ char* const out_begin = options.out_begin;
+ char* const out_end = options.out_end;
+
+ // Write a NUL terminator first. Later memcpy calls will keep bumping it
+ // along to its new right place.
+ const size_t out_size = static_cast<size_t>(out_end - out_begin);
+ if (out_size == 0) return nullptr;
+ *out_begin = '\0';
+
+ // RFC 3492 section 6.2 begins here. We retain the names of integer variables
+ // appearing in that text.
+ uint32_t n = 128, i = 0, bias = 72, num_chars = 0;
+
+ // If there are any ASCII characters, consume them and their trailing
+ // underscore delimiter.
+ if (!ConsumeOptionalAsciiPrefix(punycode_begin, punycode_end,
+ out_begin, out_end, num_chars)) {
+ return nullptr;
+ }
+ uint32_t total_utf8_bytes = num_chars;
+
+ BoundedUtf8LengthSequence<kMaxChars> utf8_lengths;
+
+ // "while the input is not exhausted do begin ... end"
+ while (punycode_begin != punycode_end) {
+ if (num_chars >= kMaxChars) return nullptr;
+
+ const uint32_t old_i = i;
+
+ if (!ScanNextDelta(punycode_begin, punycode_end, bias, i)) return nullptr;
+
+ // Update bias as in RFC 3492 section 6.1. (We have inlined adapt.)
+ uint32_t delta = i - old_i;
+ delta /= (old_i == 0 ? kDamp : 2);
+ delta += delta/(num_chars + 1);
+ bias = 0;
+ while (delta > ((kBase - kTMin) * kTMax)/2) {
+ delta /= kBase - kTMin;
+ bias += kBase;
+ }
+ bias += ((kBase - kTMin + 1) * delta)/(delta + kSkew);
+
+ // Back in section 6.2, compute the new code point and insertion index.
+ static_assert(
+ kMaxI + kMaxCodePoint < (uint64_t{1} << 32),
+ "Make kMaxI smaller or n 64 bits wide to prevent silent wraparound");
+ n += i/(num_chars + 1);
+ i %= num_chars + 1;
+
+ // To actually insert, we need to convert the code point n to UTF-8 and the
+ // character index i to an index into the byte stream emitted so far. First
+ // prepare the UTF-8 encoding for n, rejecting surrogates, overlarge values,
+ // and anything that won't fit into the remaining output storage.
+ Utf8ForCodePoint utf8_for_code_point(n);
+ if (!utf8_for_code_point.ok()) return nullptr;
+ if (total_utf8_bytes + utf8_for_code_point.length + 1 > out_size) {
+ return nullptr;
+ }
+
+ // Now insert the new character into both our length map and the output.
+ uint32_t n_index =
+ utf8_lengths.InsertAndReturnSumOfPredecessors(
+ i, utf8_for_code_point.length);
+ std::memmove(
+ out_begin + n_index + utf8_for_code_point.length, out_begin + n_index,
+ total_utf8_bytes + 1 - n_index);
+ std::memcpy(out_begin + n_index, utf8_for_code_point.bytes,
+ utf8_for_code_point.length);
+ total_utf8_bytes += utf8_for_code_point.length;
+ ++num_chars;
+
+ // Finally, advance to the next state before continuing.
+ ++i;
+ }
+
+ return out_begin + total_utf8_bytes;
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h
new file mode 100644
index 0000000000..0ae53ff31e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h
@@ -0,0 +1,55 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
+#define ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+struct DecodeRustPunycodeOptions {
+ const char* punycode_begin;
+ const char* punycode_end;
+ char* out_begin;
+ char* out_end;
+};
+
+// Given Rust Punycode in `punycode_begin .. punycode_end`, writes the
+// corresponding UTF-8 plaintext into `out_begin .. out_end`, followed by a NUL
+// character, and returns a pointer to that final NUL on success. On failure
+// returns a null pointer, and the contents of `out_begin .. out_end` are
+// unspecified.
+//
+// Failure occurs in precisely these cases:
+// - Any input byte does not match [0-9a-zA-Z_].
+// - The first input byte is an underscore, but no other underscore appears in
+// the input.
+// - The delta sequence does not represent a valid sequence of code-point
+// insertions.
+// - The plaintext would contain more than 256 code points.
+//
+// DecodeRustPunycode is async-signal-safe with bounded runtime and a small
+// stack footprint, making it suitable for use in demangling Rust symbol names
+// from a signal handler.
+absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
index 381a2b504b..caac76367f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.cc
@@ -14,18 +14,19 @@
// For reference check out:
// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
-//
-// Note that we only have partial C++11 support yet.
#include "absl/debugging/internal/demangle.h"
+#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
+#include <cstring>
#include <limits>
#include <string>
#include "absl/base/config.h"
+#include "absl/debugging/internal/demangle_rust.h"
#if ABSL_INTERNAL_HAS_CXA_DEMANGLE
#include <cxxabi.h>
@@ -44,14 +45,16 @@ typedef struct {
// List of operators from Itanium C++ ABI.
static const AbbrevPair kOperatorList[] = {
- // New has special syntax (not currently supported).
+ // New has special syntax.
{"nw", "new", 0},
{"na", "new[]", 0},
- // Works except that the 'gs' prefix is not supported.
+ // Special-cased elsewhere to support the optional gs prefix.
{"dl", "delete", 1},
{"da", "delete[]", 1},
+ {"aw", "co_await", 1},
+
{"ps", "+", 1}, // "positive"
{"ng", "-", 1}, // "negative"
{"ad", "&", 1}, // "address-of"
@@ -79,6 +82,7 @@ static const AbbrevPair kOperatorList[] = {
{"rs", ">>", 2},
{"lS", "<<=", 2},
{"rS", ">>=", 2},
+ {"ss", "<=>", 2},
{"eq", "==", 2},
{"ne", "!=", 2},
{"lt", "<", 2},
@@ -98,6 +102,7 @@ static const AbbrevPair kOperatorList[] = {
{"qu", "?", 3},
{"st", "sizeof", 0}, // Special syntax
{"sz", "sizeof", 1}, // Not a real operator name, but used in expressions.
+ {"sZ", "sizeof...", 0}, // Special syntax
{nullptr, nullptr, 0},
};
@@ -187,9 +192,50 @@ typedef struct {
int recursion_depth; // For stack exhaustion prevention.
int steps; // Cap how much work we'll do, regardless of depth.
ParseState parse_state; // Backtrackable state copied for most frames.
+
+ // Conditionally compiled support for marking the position of the first
+ // construct Demangle couldn't parse. This preprocessor symbol is intended
+ // for use by Abseil demangler maintainers only; its behavior is not part of
+ // Abseil's public interface.
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+ int high_water_mark; // Input position where parsing failed.
+ bool too_complex; // True if any guard.IsTooComplex() call returned true.
+#endif
} State;
namespace {
+
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+void UpdateHighWaterMark(State *state) {
+ if (state->high_water_mark < state->parse_state.mangled_idx) {
+ state->high_water_mark = state->parse_state.mangled_idx;
+ }
+}
+
+void ReportHighWaterMark(State *state) {
+ // Write out the mangled name with the trouble point marked, provided that the
+ // output buffer is large enough and the mangled name did not hit a complexity
+ // limit (in which case the high water mark wouldn't point out an unparsable
+ // construct, only the point where a budget ran out).
+ const size_t input_length = std::strlen(state->mangled_begin);
+ if (input_length + 6 > static_cast<size_t>(state->out_end_idx) ||
+ state->too_complex) {
+ if (state->out_end_idx > 0) state->out[0] = '\0';
+ return;
+ }
+ const size_t high_water_mark = static_cast<size_t>(state->high_water_mark);
+ std::memcpy(state->out, state->mangled_begin, high_water_mark);
+ std::memcpy(state->out + high_water_mark, "--!--", 5);
+ std::memcpy(state->out + high_water_mark + 5,
+ state->mangled_begin + high_water_mark,
+ input_length - high_water_mark);
+ state->out[input_length + 5] = '\0';
+}
+#else
+void UpdateHighWaterMark(State *) {}
+void ReportHighWaterMark(State *) {}
+#endif
+
// Prevent deep recursion / stack exhaustion.
// Also prevent unbounded handling of complex inputs.
class ComplexityGuard {
@@ -201,7 +247,7 @@ class ComplexityGuard {
~ComplexityGuard() { --state_->recursion_depth; }
// 256 levels of recursion seems like a reasonable upper limit on depth.
- // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
+ // 128 is not enough to demangle synthetic tests from demangle_unittest.txt:
// "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
static constexpr int kRecursionDepthLimit = 256;
@@ -222,8 +268,14 @@ class ComplexityGuard {
static constexpr int kParseStepsLimit = 1 << 17;
bool IsTooComplex() const {
- return state_->recursion_depth > kRecursionDepthLimit ||
- state_->steps > kParseStepsLimit;
+ if (state_->recursion_depth > kRecursionDepthLimit ||
+ state_->steps > kParseStepsLimit) {
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+ state_->too_complex = true;
+#endif
+ return true;
+ }
+ return false;
}
private:
@@ -270,6 +322,10 @@ static void InitState(State* state,
state->out_end_idx = static_cast<int>(out_size);
state->recursion_depth = 0;
state->steps = 0;
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+ state->high_water_mark = 0;
+ state->too_complex = false;
+#endif
state->parse_state.mangled_idx = 0;
state->parse_state.out_cur_idx = 0;
@@ -291,13 +347,14 @@ static bool ParseOneCharToken(State *state, const char one_char_token) {
if (guard.IsTooComplex()) return false;
if (RemainingInput(state)[0] == one_char_token) {
++state->parse_state.mangled_idx;
+ UpdateHighWaterMark(state);
return true;
}
return false;
}
-// Returns true and advances "mangled_cur" if we find "two_char_token"
-// at "mangled_cur" position. It is assumed that "two_char_token" does
+// Returns true and advances "mangled_idx" if we find "two_char_token"
+// at "mangled_idx" position. It is assumed that "two_char_token" does
// not contain '\0'.
static bool ParseTwoCharToken(State *state, const char *two_char_token) {
ComplexityGuard guard(state);
@@ -305,11 +362,45 @@ static bool ParseTwoCharToken(State *state, const char *two_char_token) {
if (RemainingInput(state)[0] == two_char_token[0] &&
RemainingInput(state)[1] == two_char_token[1]) {
state->parse_state.mangled_idx += 2;
+ UpdateHighWaterMark(state);
return true;
}
return false;
}
+// Returns true and advances "mangled_idx" if we find "three_char_token"
+// at "mangled_idx" position. It is assumed that "three_char_token" does
+// not contain '\0'.
+static bool ParseThreeCharToken(State *state, const char *three_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == three_char_token[0] &&
+ RemainingInput(state)[1] == three_char_token[1] &&
+ RemainingInput(state)[2] == three_char_token[2]) {
+ state->parse_state.mangled_idx += 3;
+ UpdateHighWaterMark(state);
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_idx" if we find a copy of the
+// NUL-terminated string "long_token" at "mangled_idx" position.
+static bool ParseLongToken(State *state, const char *long_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ int i = 0;
+ for (; long_token[i] != '\0'; ++i) {
+ // Note that we cannot run off the end of the NUL-terminated input here.
+ // Inside the loop body, long_token[i] is known to be different from NUL.
+ // So if we read the NUL on the end of the input here, we return at once.
+ if (RemainingInput(state)[i] != long_token[i]) return false;
+ }
+ state->parse_state.mangled_idx += i;
+ UpdateHighWaterMark(state);
+ return true;
+}
+
// Returns true and advances "mangled_cur" if we find any character in
// "char_class" at "mangled_cur" position.
static bool ParseCharClass(State *state, const char *char_class) {
@@ -322,6 +413,7 @@ static bool ParseCharClass(State *state, const char *char_class) {
for (; *p != '\0'; ++p) {
if (RemainingInput(state)[0] == *p) {
++state->parse_state.mangled_idx;
+ UpdateHighWaterMark(state);
return true;
}
}
@@ -554,6 +646,7 @@ static bool ParseFloatNumber(State *state);
static bool ParseSeqId(State *state);
static bool ParseIdentifier(State *state, size_t length);
static bool ParseOperatorName(State *state, int *arity);
+static bool ParseConversionOperatorType(State *state);
static bool ParseSpecialName(State *state);
static bool ParseCallOffset(State *state);
static bool ParseNVOffset(State *state);
@@ -563,21 +656,33 @@ static bool ParseCtorDtorName(State *state);
static bool ParseDecltype(State *state);
static bool ParseType(State *state);
static bool ParseCVQualifiers(State *state);
+static bool ParseExtendedQualifier(State *state);
static bool ParseBuiltinType(State *state);
+static bool ParseVendorExtendedType(State *state);
static bool ParseFunctionType(State *state);
static bool ParseBareFunctionType(State *state);
+static bool ParseOverloadAttribute(State *state);
static bool ParseClassEnumType(State *state);
static bool ParseArrayType(State *state);
static bool ParsePointerToMemberType(State *state);
static bool ParseTemplateParam(State *state);
+static bool ParseTemplateParamDecl(State *state);
static bool ParseTemplateTemplateParam(State *state);
static bool ParseTemplateArgs(State *state);
static bool ParseTemplateArg(State *state);
static bool ParseBaseUnresolvedName(State *state);
static bool ParseUnresolvedName(State *state);
+static bool ParseUnresolvedQualifierLevel(State *state);
+static bool ParseUnionSelector(State* state);
+static bool ParseFunctionParam(State* state);
+static bool ParseBracedExpression(State *state);
static bool ParseExpression(State *state);
+static bool ParseInitializer(State *state);
static bool ParseExprPrimary(State *state);
-static bool ParseExprCastValue(State *state);
+static bool ParseExprCastValueAndTrailingE(State *state);
+static bool ParseQRequiresClauseExpr(State *state);
+static bool ParseRequirement(State *state);
+static bool ParseTypeConstraint(State *state);
static bool ParseLocalName(State *state);
static bool ParseLocalNameSuffix(State *state);
static bool ParseDiscriminator(State *state);
@@ -622,22 +727,34 @@ static bool ParseMangledName(State *state) {
}
// <encoding> ::= <(function) name> <bare-function-type>
+// [`Q` <requires-clause expr>]
// ::= <(data) name>
// ::= <special-name>
+//
+// NOTE: Based on http://shortn/_Hoq9qG83rx
static bool ParseEncoding(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
- // Implementing the first two productions together as <name>
- // [<bare-function-type>] avoids exponential blowup of backtracking.
+ // Since the first two productions both start with <name>, attempt
+ // to parse it only once to avoid exponential blowup of backtracking.
//
- // Since Optional(...) can't fail, there's no need to copy the state for
- // backtracking.
- if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
+ // We're careful about exponential blowup because <encoding> recursively
+ // appears in other productions downstream of its first two productions,
+ // which means that every call to `ParseName` would possibly indirectly
+ // result in two calls to `ParseName` etc.
+ if (ParseName(state)) {
+ if (!ParseBareFunctionType(state)) {
+ return true; // <(data) name>
+ }
+
+ // Parsed: <(function) name> <bare-function-type>
+ // Pending: [`Q` <requires-clause expr>]
+ ParseQRequiresClauseExpr(state); // restores state on failure
return true;
}
if (ParseSpecialName(state)) {
- return true;
+ return true; // <special-name>
}
return false;
}
@@ -723,19 +840,26 @@ static bool ParseNestedName(State *state) {
// <prefix> ::= <prefix> <unqualified-name>
// ::= <template-prefix> <template-args>
// ::= <template-param>
+// ::= <decltype>
// ::= <substitution>
// ::= # empty
// <template-prefix> ::= <prefix> <(template) unqualified-name>
// ::= <template-param>
// ::= <substitution>
+// ::= <vendor-extended-type>
static bool ParsePrefix(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
- if (ParseTemplateParam(state) ||
+ if (ParseTemplateParam(state) || ParseDecltype(state) ||
ParseSubstitution(state, /*accept_std=*/true) ||
+ // Although the official grammar does not mention it, nested-names
+ // shaped like Nu14__some_builtinIiE6memberE occur in practice, and it
+ // is not clear what else a compiler is supposed to do when a
+ // vendor-extended type has named members.
+ ParseVendorExtendedType(state) ||
ParseUnscopedName(state) ||
(ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
has_something = true;
@@ -757,8 +881,14 @@ static bool ParsePrefix(State *state) {
// ::= <source-name> [<abi-tags>]
// ::= <local-source-name> [<abi-tags>]
// ::= <unnamed-type-name> [<abi-tags>]
+// ::= DC <source-name>+ E # C++17 structured binding
+// ::= F <source-name> # C++20 constrained friend
+// ::= F <operator-name> # C++20 constrained friend
//
// <local-source-name> is a GCC extension; see below.
+//
+// For the F notation for constrained friends, see
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/24#issuecomment-1491130332.
static bool ParseUnqualifiedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
@@ -767,6 +897,23 @@ static bool ParseUnqualifiedName(State *state) {
ParseUnnamedTypeName(state)) {
return ParseAbiTags(state);
}
+
+ // DC <source-name>+ E
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "DC") && OneOrMore(ParseSourceName, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // F <source-name>
+ // F <operator-name>
+ if (ParseOneCharToken(state, 'F') && MaybeAppend(state, "friend ") &&
+ (ParseSourceName(state) || ParseOperatorName(state, nullptr))) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
@@ -824,7 +971,11 @@ static bool ParseLocalSourceName(State *state) {
// <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
// ::= <closure-type-name>
// <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
-// <lambda-sig> ::= <(parameter) type>+
+// <lambda-sig> ::= <template-param-decl>* <(parameter) type>+
+//
+// For <template-param-decl>* in <lambda-sig> see:
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/31
static bool ParseUnnamedTypeName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
@@ -847,6 +998,7 @@ static bool ParseUnnamedTypeName(State *state) {
// Closure type.
which = -1;
if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
+ ZeroOrMore(ParseTemplateParamDecl, state) &&
OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
@@ -888,6 +1040,7 @@ static bool ParseNumber(State *state, int *number_out) {
}
if (p != RemainingInput(state)) { // Conversion succeeded.
state->parse_state.mangled_idx += p - RemainingInput(state);
+ UpdateHighWaterMark(state);
if (number_out != nullptr) {
// Note: possibly truncate "number".
*number_out = static_cast<int>(number);
@@ -910,6 +1063,7 @@ static bool ParseFloatNumber(State *state) {
}
if (p != RemainingInput(state)) { // Conversion succeeded.
state->parse_state.mangled_idx += p - RemainingInput(state);
+ UpdateHighWaterMark(state);
return true;
}
return false;
@@ -928,6 +1082,7 @@ static bool ParseSeqId(State *state) {
}
if (p != RemainingInput(state)) { // Conversion succeeded.
state->parse_state.mangled_idx += p - RemainingInput(state);
+ UpdateHighWaterMark(state);
return true;
}
return false;
@@ -946,11 +1101,13 @@ static bool ParseIdentifier(State *state, size_t length) {
MaybeAppendWithLength(state, RemainingInput(state), length);
}
state->parse_state.mangled_idx += length;
+ UpdateHighWaterMark(state);
return true;
}
// <operator-name> ::= nw, and other two letters cases
// ::= cv <type> # (cast)
+// ::= li <source-name> # C++11 user-defined literal
// ::= v <digit> <source-name> # vendor extended operator
static bool ParseOperatorName(State *state, int *arity) {
ComplexityGuard guard(state);
@@ -961,7 +1118,7 @@ static bool ParseOperatorName(State *state, int *arity) {
// First check with "cv" (cast) case.
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
- EnterNestedName(state) && ParseType(state) &&
+ EnterNestedName(state) && ParseConversionOperatorType(state) &&
LeaveNestedName(state, copy.nest_level)) {
if (arity != nullptr) {
*arity = 1;
@@ -970,6 +1127,13 @@ static bool ParseOperatorName(State *state, int *arity) {
}
state->parse_state = copy;
+ // Then user-defined literals.
+ if (ParseTwoCharToken(state, "li") && MaybeAppend(state, "operator\"\" ") &&
+ ParseSourceName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
// Then vendor extended operators.
if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
ParseSourceName(state)) {
@@ -997,36 +1161,120 @@ static bool ParseOperatorName(State *state, int *arity) {
}
MaybeAppend(state, p->real_name);
state->parse_state.mangled_idx += 2;
+ UpdateHighWaterMark(state);
return true;
}
}
return false;
}
+// <operator-name> ::= cv <type> # (cast)
+//
+// The name of a conversion operator is the one place where cv-qualifiers, *, &,
+// and other simple type combinators are expected to appear in our stripped-down
+// demangling (elsewhere they appear in function signatures or template
+// arguments, which we omit from the output). We make reasonable efforts to
+// render simple cases accurately.
+static bool ParseConversionOperatorType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // Scan pointers, const, and other easy mangling prefixes with postfix
+ // demanglings. Remember the range of input for later rescanning.
+ //
+ // See `ParseType` and the `switch` below for the meaning of each char.
+ const char* begin_simple_prefixes = RemainingInput(state);
+ while (ParseCharClass(state, "OPRCGrVK")) {}
+ const char* end_simple_prefixes = RemainingInput(state);
+
+ // Emit the base type first.
+ if (!ParseType(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+
+ // Then rescan the easy type combinators in reverse order to emit their
+ // demanglings in the expected output order.
+ while (begin_simple_prefixes != end_simple_prefixes) {
+ switch (*--end_simple_prefixes) {
+ case 'P':
+ MaybeAppend(state, "*");
+ break;
+ case 'R':
+ MaybeAppend(state, "&");
+ break;
+ case 'O':
+ MaybeAppend(state, "&&");
+ break;
+ case 'C':
+ MaybeAppend(state, " _Complex");
+ break;
+ case 'G':
+ MaybeAppend(state, " _Imaginary");
+ break;
+ case 'r':
+ MaybeAppend(state, " restrict");
+ break;
+ case 'V':
+ MaybeAppend(state, " volatile");
+ break;
+ case 'K':
+ MaybeAppend(state, " const");
+ break;
+ }
+ }
+ return true;
+}
+
// <special-name> ::= TV <type>
// ::= TT <type>
// ::= TI <type>
// ::= TS <type>
-// ::= TH <type> # thread-local
+// ::= TW <name> # thread-local wrapper
+// ::= TH <name> # thread-local initialization
// ::= Tc <call-offset> <call-offset> <(base) encoding>
// ::= GV <(object) name>
+// ::= GR <(object) name> [<seq-id>] _
// ::= T <call-offset> <(base) encoding>
+// ::= GTt <encoding> # transaction-safe entry point
+// ::= TA <template-arg> # nontype template parameter object
// G++ extensions:
// ::= TC <type> <(offset) number> _ <(base) type>
// ::= TF <type>
// ::= TJ <type>
-// ::= GR <name>
+// ::= GR <name> # without final _, perhaps an earlier form?
// ::= GA <encoding>
// ::= Th <call-offset> <(base) encoding>
// ::= Tv <call-offset> <(base) encoding>
//
-// Note: we don't care much about them since they don't appear in
-// stack traces. The are special data.
+// Note: Most of these are special data, not functions that occur in stack
+// traces. Exceptions are TW and TH, which denote functions supporting the
+// thread_local feature. For these see:
+//
+// https://maskray.me/blog/2021-02-14-all-about-thread-local-storage
+//
+// For TA see https://github.com/itanium-cxx-abi/cxx-abi/issues/63.
static bool ParseSpecialName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
- if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
+
+ if (ParseTwoCharToken(state, "TW")) {
+ MaybeAppend(state, "thread-local wrapper routine for ");
+ if (ParseName(state)) return true;
+ state->parse_state = copy;
+ return false;
+ }
+
+ if (ParseTwoCharToken(state, "TH")) {
+ MaybeAppend(state, "thread-local initialization routine for ");
+ if (ParseName(state)) return true;
+ state->parse_state = copy;
+ return false;
+ }
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
ParseType(state)) {
return true;
}
@@ -1064,21 +1312,51 @@ static bool ParseSpecialName(State *state) {
}
state->parse_state = copy;
- if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+ // <special-name> ::= GR <(object) name> [<seq-id>] _ # modern standard
+ // ::= GR <(object) name> # also recognized
+ if (ParseTwoCharToken(state, "GR")) {
+ MaybeAppend(state, "reference temporary for ");
+ if (!ParseName(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+ const bool has_seq_id = ParseSeqId(state);
+ const bool has_underscore = ParseOneCharToken(state, '_');
+ if (has_seq_id && !has_underscore) {
+ state->parse_state = copy;
+ return false;
+ }
return true;
}
- state->parse_state = copy;
if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
+ if (ParseThreeCharToken(state, "GTt") &&
+ MaybeAppend(state, "transaction clone for ") && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "TA")) {
+ bool append = state->parse_state.append;
+ DisableAppend(state);
+ if (ParseTemplateArg(state)) {
+ RestoreAppend(state, append);
+ MaybeAppend(state, "template parameter object");
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
return false;
}
@@ -1182,7 +1460,6 @@ static bool ParseDecltype(State *state) {
// ::= O <type> # rvalue reference-to (C++0x)
// ::= C <type> # complex pair (C 2000)
// ::= G <type> # imaginary (C 2000)
-// ::= U <source-name> <type> # vendor extended type qualifier
// ::= <builtin-type>
// ::= <function-type>
// ::= <class-enum-type> # note: just an alias for <name>
@@ -1193,7 +1470,9 @@ static bool ParseDecltype(State *state) {
// ::= <decltype>
// ::= <substitution>
// ::= Dp <type> # pack expansion of (C++0x)
-// ::= Dv <num-elems> _ # GNU vector extension
+// ::= Dv <(elements) number> _ <type> # GNU vector extension
+// ::= Dv <(bytes) expression> _ <type>
+// ::= Dk <type-constraint> # constrained auto
//
static bool ParseType(State *state) {
ComplexityGuard guard(state);
@@ -1236,12 +1515,6 @@ static bool ParseType(State *state) {
}
state->parse_state = copy;
- if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
- ParseType(state)) {
- return true;
- }
- state->parse_state = copy;
-
if (ParseBuiltinType(state) || ParseFunctionType(state) ||
ParseClassEnumType(state) || ParseArrayType(state) ||
ParsePointerToMemberType(state) || ParseDecltype(state) ||
@@ -1260,54 +1533,160 @@ static bool ParseType(State *state) {
return true;
}
+ // GNU vector extension Dv <number> _ <type>
if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
- ParseOneCharToken(state, '_')) {
+ ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
state->parse_state = copy;
- return false;
+ // GNU vector extension Dv <expression> _ <type>
+ if (ParseTwoCharToken(state, "Dv") && ParseExpression(state) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Dk") && ParseTypeConstraint(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // For this notation see CXXNameMangler::mangleType in Clang's source code.
+ // The relevant logic and its comment "not clear how to mangle this!" date
+ // from 2011, so it may be with us awhile.
+ return ParseLongToken(state, "_SUBSTPACK_");
}
+// <qualifiers> ::= <extended-qualifier>* <CV-qualifiers>
// <CV-qualifiers> ::= [r] [V] [K]
+//
// We don't allow empty <CV-qualifiers> to avoid infinite loop in
// ParseType().
static bool ParseCVQualifiers(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
int num_cv_qualifiers = 0;
+ while (ParseExtendedQualifier(state)) ++num_cv_qualifiers;
num_cv_qualifiers += ParseOneCharToken(state, 'r');
num_cv_qualifiers += ParseOneCharToken(state, 'V');
num_cv_qualifiers += ParseOneCharToken(state, 'K');
return num_cv_qualifiers > 0;
}
+// <extended-qualifier> ::= U <source-name> [<template-args>]
+static bool ParseExtendedQualifier(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ if (!ParseOneCharToken(state, 'U')) return false;
+
+ bool append = state->parse_state.append;
+ DisableAppend(state);
+ if (!ParseSourceName(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+ Optional(ParseTemplateArgs(state));
+ RestoreAppend(state, append);
+ return true;
+}
+
// <builtin-type> ::= v, etc. # single-character builtin types
-// ::= u <source-name>
+// ::= <vendor-extended-type>
// ::= Dd, etc. # two-character builtin types
+// ::= DB (<number> | <expression>) _ # _BitInt(N)
+// ::= DU (<number> | <expression>) _ # unsigned _BitInt(N)
+// ::= DF <number> _ # _FloatN (N bits)
+// ::= DF <number> x # _FloatNx
+// ::= DF16b # std::bfloat16_t
//
// Not supported:
-// ::= DF <number> _ # _FloatN (N bits)
-//
+// ::= [DS] DA <fixed-point-size>
+// ::= [DS] DR <fixed-point-size>
+// because real implementations of N1169 fixed-point are scant.
static bool ParseBuiltinType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
- const AbbrevPair *p;
- for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+ ParseState copy = state->parse_state;
+
+ // DB (<number> | <expression>) _ # _BitInt(N)
+ // DU (<number> | <expression>) _ # unsigned _BitInt(N)
+ if (ParseTwoCharToken(state, "DB") ||
+ (ParseTwoCharToken(state, "DU") && MaybeAppend(state, "unsigned "))) {
+ bool append = state->parse_state.append;
+ DisableAppend(state);
+ int number = -1;
+ if (!ParseNumber(state, &number) && !ParseExpression(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+ RestoreAppend(state, append);
+
+ if (!ParseOneCharToken(state, '_')) {
+ state->parse_state = copy;
+ return false;
+ }
+
+ MaybeAppend(state, "_BitInt(");
+ if (number >= 0) {
+ MaybeAppendDecimal(state, number);
+ } else {
+ MaybeAppend(state, "?"); // the best we can do for dependent sizes
+ }
+ MaybeAppend(state, ")");
+ return true;
+ }
+
+ // DF <number> _ # _FloatN
+ // DF <number> x # _FloatNx
+ // DF16b # std::bfloat16_t
+ if (ParseTwoCharToken(state, "DF")) {
+ if (ParseThreeCharToken(state, "16b")) {
+ MaybeAppend(state, "std::bfloat16_t");
+ return true;
+ }
+ int number = 0;
+ if (!ParseNumber(state, &number)) {
+ state->parse_state = copy;
+ return false;
+ }
+ MaybeAppend(state, "_Float");
+ MaybeAppendDecimal(state, number);
+ if (ParseOneCharToken(state, 'x')) {
+ MaybeAppend(state, "x");
+ return true;
+ }
+ if (ParseOneCharToken(state, '_')) return true;
+ state->parse_state = copy;
+ return false;
+ }
+
+ for (const AbbrevPair *p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
// Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
if (p->abbrev[1] == '\0') {
if (ParseOneCharToken(state, p->abbrev[0])) {
MaybeAppend(state, p->real_name);
- return true;
+ return true; // ::= v, etc. # single-character builtin types
}
} else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
MaybeAppend(state, p->real_name);
- return true;
+ return true; // ::= Dd, etc. # two-character builtin types
}
}
+ return ParseVendorExtendedType(state);
+}
+
+// <vendor-extended-type> ::= u <source-name> [<template-args>]
+static bool ParseVendorExtendedType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
ParseState copy = state->parse_state;
- if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
+ Optional(ParseTemplateArgs(state))) {
return true;
}
state->parse_state = copy;
@@ -1342,28 +1721,44 @@ static bool ParseExceptionSpec(State *state) {
return false;
}
-// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+// <function-type> ::=
+// [exception-spec] [Dx] F [Y] <bare-function-type> [<ref-qualifier>] E
+//
+// <ref-qualifier> ::= R | O
static bool ParseFunctionType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
- if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
- Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
- Optional(ParseOneCharToken(state, 'O')) &&
- ParseOneCharToken(state, 'E')) {
- return true;
+ Optional(ParseExceptionSpec(state));
+ Optional(ParseTwoCharToken(state, "Dx"));
+ if (!ParseOneCharToken(state, 'F')) {
+ state->parse_state = copy;
+ return false;
}
- state->parse_state = copy;
- return false;
+ Optional(ParseOneCharToken(state, 'Y'));
+ if (!ParseBareFunctionType(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+ Optional(ParseCharClass(state, "RO"));
+ if (!ParseOneCharToken(state, 'E')) {
+ state->parse_state = copy;
+ return false;
+ }
+ return true;
}
-// <bare-function-type> ::= <(signature) type>+
+// <bare-function-type> ::= <overload-attribute>* <(signature) type>+
+//
+// The <overload-attribute>* prefix is nonstandard; see the comment on
+// ParseOverloadAttribute.
static bool ParseBareFunctionType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
DisableAppend(state);
- if (OneOrMore(ParseType, state)) {
+ if (ZeroOrMore(ParseOverloadAttribute, state) &&
+ OneOrMore(ParseType, state)) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "()");
return true;
@@ -1372,11 +1767,43 @@ static bool ParseBareFunctionType(State *state) {
return false;
}
+// <overload-attribute> ::= Ua <name>
+//
+// The nonstandard <overload-attribute> production is sufficient to accept the
+// current implementation of __attribute__((enable_if(condition, "message")))
+// and future attributes of a similar shape. See
+// https://clang.llvm.org/docs/AttributeReference.html#enable-if and the
+// definition of CXXNameMangler::mangleFunctionEncodingBareType in Clang's
+// source code.
+static bool ParseOverloadAttribute(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "Ua") && ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
// <class-enum-type> ::= <name>
+// ::= Ts <name> # struct Name or class Name
+// ::= Tu <name> # union Name
+// ::= Te <name> # enum Name
+//
+// See http://shortn/_W3YrltiEd0.
static bool ParseClassEnumType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
- return ParseName(state);
+ ParseState copy = state->parse_state;
+ if (Optional(ParseTwoCharToken(state, "Ts") ||
+ ParseTwoCharToken(state, "Tu") ||
+ ParseTwoCharToken(state, "Te")) &&
+ ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
}
// <array-type> ::= A <(positive dimension) number> _ <(element) type>
@@ -1413,21 +1840,83 @@ static bool ParsePointerToMemberType(State *state) {
// <template-param> ::= T_
// ::= T <parameter-2 non-negative number> _
+// ::= TL <level-1> __
+// ::= TL <level-1> _ <parameter-2 non-negative number> _
static bool ParseTemplateParam(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTwoCharToken(state, "T_")) {
MaybeAppend(state, "?"); // We don't support template substitutions.
- return true;
+ return true; // ::= T_
}
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true; // ::= T <parameter-2 non-negative number> _
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "TL") && ParseNumber(state, nullptr)) {
+ if (ParseTwoCharToken(state, "__")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true; // ::= TL <level-1> __
+ }
+
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true; // ::= TL <level-1> _ <parameter-2 non-negative number> _
+ }
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-param-decl>
+// ::= Ty # template type parameter
+// ::= Tk <concept name> [<template-args>] # constrained type parameter
+// ::= Tn <type> # template non-type parameter
+// ::= Tt <template-param-decl>* E # template template parameter
+// ::= Tp <template-param-decl> # template parameter pack
+//
+// NOTE: <concept name> is just a <name>: http://shortn/_MqJVyr0fc1
+// TODO(b/324066279): Implement optional suffix for `Tt`:
+// [Q <requires-clause expr>]
+static bool ParseTemplateParamDecl(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ if (ParseTwoCharToken(state, "Ty")) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tk") && ParseName(state) &&
+ Optional(ParseTemplateArgs(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tn") && ParseType(state)) {
return true;
}
state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tt") &&
+ ZeroOrMore(ParseTemplateParamDecl, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tp") && ParseTemplateParamDecl(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
@@ -1441,13 +1930,14 @@ static bool ParseTemplateTemplateParam(State *state) {
ParseSubstitution(state, /*accept_std=*/false));
}
-// <template-args> ::= I <template-arg>+ E
+// <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E
static bool ParseTemplateArgs(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
DisableAppend(state);
if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+ Optional(ParseQRequiresClauseExpr(state)) &&
ParseOneCharToken(state, 'E')) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "<>");
@@ -1457,7 +1947,8 @@ static bool ParseTemplateArgs(State *state) {
return false;
}
-// <template-arg> ::= <type>
+// <template-arg> ::= <template-param-decl> <template-arg>
+// ::= <type>
// ::= <expr-primary>
// ::= J <template-arg>* E # argument pack
// ::= X <expression> E
@@ -1541,7 +2032,7 @@ static bool ParseTemplateArg(State *state) {
// ::= L <source-name> [<template-args>] [<expr-cast-value> E]
if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
copy = state->parse_state;
- if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
+ if (ParseExprCastValueAndTrailingE(state)) {
return true;
}
state->parse_state = copy;
@@ -1560,6 +2051,12 @@ static bool ParseTemplateArg(State *state) {
return true;
}
state->parse_state = copy;
+
+ if (ParseTemplateParamDecl(state) && ParseTemplateArg(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
@@ -1614,6 +2111,13 @@ static bool ParseBaseUnresolvedName(State *state) {
// <base-unresolved-name>
// ::= [gs] sr <unresolved-qualifier-level>+ E
// <base-unresolved-name>
+// ::= sr St <simple-id> <simple-id> # nonstandard
+//
+// The last case is not part of the official grammar but has been observed in
+// real-world examples that the GNU demangler (but not the LLVM demangler) is
+// able to decode; see demangle_test.cc for one such symbol name. The shape
+// sr St <simple-id> <simple-id> was inferred by closed-box testing of the GNU
+// demangler.
static bool ParseUnresolvedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
@@ -1633,7 +2137,7 @@ static bool ParseUnresolvedName(State *state) {
if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
ParseUnresolvedType(state) &&
- OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ OneOrMore(ParseUnresolvedQualifierLevel, state) &&
ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
return true;
}
@@ -1641,35 +2145,160 @@ static bool ParseUnresolvedName(State *state) {
if (Optional(ParseTwoCharToken(state, "gs")) &&
ParseTwoCharToken(state, "sr") &&
- OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ OneOrMore(ParseUnresolvedQualifierLevel, state) &&
ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
+ if (ParseTwoCharToken(state, "sr") && ParseTwoCharToken(state, "St") &&
+ ParseSimpleId(state) && ParseSimpleId(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
+// <unresolved-qualifier-level> ::= <simple-id>
+// ::= <substitution> <template-args>
+//
+// The production <substitution> <template-args> is nonstandard but is observed
+// in practice. An upstream discussion on the best shape of <unresolved-name>
+// has not converged:
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/38
+static bool ParseUnresolvedQualifierLevel(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (ParseSimpleId(state)) return true;
+
+ ParseState copy = state->parse_state;
+ if (ParseSubstitution(state, /*accept_std=*/false) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <union-selector> ::= _ [<number>]
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/47
+static bool ParseUnionSelector(State *state) {
+ return ParseOneCharToken(state, '_') && Optional(ParseNumber(state, nullptr));
+}
+
+// <function-param> ::= fp <(top-level) CV-qualifiers> _
+// ::= fp <(top-level) CV-qualifiers> <number> _
+// ::= fL <number> p <(top-level) CV-qualifiers> _
+// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+// ::= fpT # this
+static bool ParseFunctionParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+
+ // Function-param expression (level 0).
+ if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 1+).
+ if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
+ ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return ParseThreeCharToken(state, "fpT");
+}
+
+// <braced-expression> ::= <expression>
+// ::= di <field source-name> <braced-expression>
+// ::= dx <index expression> <braced-expression>
+// ::= dX <expression> <expression> <braced-expression>
+static bool ParseBracedExpression(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+
+ if (ParseTwoCharToken(state, "di") && ParseSourceName(state) &&
+ ParseBracedExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "dx") && ParseExpression(state) &&
+ ParseBracedExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "dX") &&
+ ParseExpression(state) && ParseExpression(state) &&
+ ParseBracedExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return ParseExpression(state);
+}
+
// <expression> ::= <1-ary operator-name> <expression>
// ::= <2-ary operator-name> <expression> <expression>
// ::= <3-ary operator-name> <expression> <expression> <expression>
+// ::= pp_ <expression> # ++e; pp <expression> is e++
+// ::= mm_ <expression> # --e; mm <expression> is e--
// ::= cl <expression>+ E
// ::= cp <simple-id> <expression>* E # Clang-specific.
+// ::= so <type> <expression> [<number>] <union-selector>* [p] E
// ::= cv <type> <expression> # type (expression)
// ::= cv <type> _ <expression>* E # type (expr-list)
+// ::= tl <type> <braced-expression>* E
+// ::= il <braced-expression>* E
+// ::= [gs] nw <expression>* _ <type> E
+// ::= [gs] nw <expression>* _ <type> <initializer>
+// ::= [gs] na <expression>* _ <type> E
+// ::= [gs] na <expression>* _ <type> <initializer>
+// ::= [gs] dl <expression>
+// ::= [gs] da <expression>
+// ::= dc <type> <expression>
+// ::= sc <type> <expression>
+// ::= cc <type> <expression>
+// ::= rc <type> <expression>
+// ::= ti <type>
+// ::= te <expression>
// ::= st <type>
+// ::= at <type>
+// ::= az <expression>
+// ::= nx <expression>
// ::= <template-param>
// ::= <function-param>
+// ::= sZ <template-param>
+// ::= sZ <function-param>
+// ::= sP <template-arg>* E
// ::= <expr-primary>
// ::= dt <expression> <unresolved-name> # expr.name
// ::= pt <expression> <unresolved-name> # expr->name
// ::= sp <expression> # argument pack expansion
+// ::= fl <binary operator-name> <expression>
+// ::= fr <binary operator-name> <expression>
+// ::= fL <binary operator-name> <expression> <expression>
+// ::= fR <binary operator-name> <expression> <expression>
+// ::= tw <expression>
+// ::= tr
// ::= sr <type> <unqualified-name> <template-args>
// ::= sr <type> <unqualified-name>
-// <function-param> ::= fp <(top-level) CV-qualifiers> _
-// ::= fp <(top-level) CV-qualifiers> <number> _
-// ::= fL <number> p <(top-level) CV-qualifiers> _
-// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+// ::= u <source-name> <template-arg>* E # vendor extension
+// ::= rq <requirement>+ E
+// ::= rQ <bare-function-type> _ <requirement>+ E
static bool ParseExpression(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
@@ -1686,6 +2315,15 @@ static bool ParseExpression(State *state) {
}
state->parse_state = copy;
+ // Preincrement and predecrement. Postincrement and postdecrement are handled
+ // by the operator-name logic later on.
+ if ((ParseThreeCharToken(state, "pp_") ||
+ ParseThreeCharToken(state, "mm_")) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
// Clang-specific "cp <simple-id> <expression>* E"
// https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
@@ -1694,17 +2332,65 @@ static bool ParseExpression(State *state) {
}
state->parse_state = copy;
- // Function-param expression (level 0).
- if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
- Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ // <expression> ::= so <type> <expression> [<number>] <union-selector>* [p] E
+ //
+ // https://github.com/itanium-cxx-abi/cxx-abi/issues/47
+ if (ParseTwoCharToken(state, "so") && ParseType(state) &&
+ ParseExpression(state) && Optional(ParseNumber(state, nullptr)) &&
+ ZeroOrMore(ParseUnionSelector, state) &&
+ Optional(ParseOneCharToken(state, 'p')) &&
+ ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
- // Function-param expression (level 1+).
- if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
- ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
- Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ // <expression> ::= <function-param>
+ if (ParseFunctionParam(state)) return true;
+ state->parse_state = copy;
+
+ // <expression> ::= tl <type> <braced-expression>* E
+ if (ParseTwoCharToken(state, "tl") && ParseType(state) &&
+ ZeroOrMore(ParseBracedExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // <expression> ::= il <braced-expression>* E
+ if (ParseTwoCharToken(state, "il") &&
+ ZeroOrMore(ParseBracedExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // <expression> ::= [gs] nw <expression>* _ <type> E
+ // ::= [gs] nw <expression>* _ <type> <initializer>
+ // ::= [gs] na <expression>* _ <type> E
+ // ::= [gs] na <expression>* _ <type> <initializer>
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ (ParseTwoCharToken(state, "nw") || ParseTwoCharToken(state, "na")) &&
+ ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, '_') &&
+ ParseType(state) &&
+ (ParseOneCharToken(state, 'E') || ParseInitializer(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // <expression> ::= [gs] dl <expression>
+ // ::= [gs] da <expression>
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ (ParseTwoCharToken(state, "dl") || ParseTwoCharToken(state, "da")) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // dynamic_cast, static_cast, const_cast, reinterpret_cast.
+ //
+ // <expression> ::= (dc | sc | cc | rc) <type> <expression>
+ if (ParseCharClass(state, "dscr") && ParseOneCharToken(state, 'c') &&
+ ParseType(state) && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
@@ -1746,15 +2432,96 @@ static bool ParseExpression(State *state) {
}
state->parse_state = copy;
+ // typeid(type)
+ if (ParseTwoCharToken(state, "ti") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // typeid(expression)
+ if (ParseTwoCharToken(state, "te") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
// sizeof type
if (ParseTwoCharToken(state, "st") && ParseType(state)) {
return true;
}
state->parse_state = copy;
+ // alignof(type)
+ if (ParseTwoCharToken(state, "at") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // alignof(expression), a GNU extension
+ if (ParseTwoCharToken(state, "az") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // noexcept(expression) appearing as an expression in a dependent signature
+ if (ParseTwoCharToken(state, "nx") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // sizeof...(pack)
+ //
+ // <expression> ::= sZ <template-param>
+ // ::= sZ <function-param>
+ if (ParseTwoCharToken(state, "sZ") &&
+ (ParseFunctionParam(state) || ParseTemplateParam(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // sizeof...(pack) captured from an alias template
+ //
+ // <expression> ::= sP <template-arg>* E
+ if (ParseTwoCharToken(state, "sP") && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Unary folds (... op pack) and (pack op ...).
+ //
+ // <expression> ::= fl <binary operator-name> <expression>
+ // ::= fr <binary operator-name> <expression>
+ if ((ParseTwoCharToken(state, "fl") || ParseTwoCharToken(state, "fr")) &&
+ ParseOperatorName(state, nullptr) && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Binary folds (init op ... op pack) and (pack op ... op init).
+ //
+ // <expression> ::= fL <binary operator-name> <expression> <expression>
+ // ::= fR <binary operator-name> <expression> <expression>
+ if ((ParseTwoCharToken(state, "fL") || ParseTwoCharToken(state, "fR")) &&
+ ParseOperatorName(state, nullptr) && ParseExpression(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // tw <expression>: throw e
+ if (ParseTwoCharToken(state, "tw") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // tr: throw (rethrows an exception from the handler that caught it)
+ if (ParseTwoCharToken(state, "tr")) return true;
+
// Object and pointer member access expressions.
+ //
+ // <expression> ::= (dt | pt) <expression> <unresolved-name>
if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
- ParseExpression(state) && ParseType(state)) {
+ ParseExpression(state) && ParseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
@@ -1774,9 +2541,61 @@ static bool ParseExpression(State *state) {
}
state->parse_state = copy;
+ // Vendor extended expressions
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
+ ZeroOrMore(ParseTemplateArg, state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // <expression> ::= rq <requirement>+ E
+ //
+ // https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+ if (ParseTwoCharToken(state, "rq") && OneOrMore(ParseRequirement, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // <expression> ::= rQ <bare-function-type> _ <requirement>+ E
+ //
+ // https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+ if (ParseTwoCharToken(state, "rQ") && ParseBareFunctionType(state) &&
+ ParseOneCharToken(state, '_') && OneOrMore(ParseRequirement, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
return ParseUnresolvedName(state);
}
+// <initializer> ::= pi <expression>* E
+// ::= il <braced-expression>* E
+//
+// The il ... E form is not in the ABI spec but is seen in practice for
+// braced-init-lists in new-expressions, which are standard syntax from C++11
+// on.
+static bool ParseInitializer(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ if (ParseTwoCharToken(state, "pi") && ZeroOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "il") &&
+ ZeroOrMore(ParseBracedExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
// <expr-primary> ::= L <type> <(value) number> E
// ::= L <type> <(value) float> E
// ::= L <mangled-name> E
@@ -1819,10 +2638,35 @@ static bool ParseExprPrimary(State *state) {
return false;
}
- // The merged cast production.
- if (ParseOneCharToken(state, 'L') && ParseType(state) &&
- ParseExprCastValue(state)) {
- return true;
+ if (ParseOneCharToken(state, 'L')) {
+ // There are two special cases in which a literal may or must contain a type
+ // without a value. The first is that both LDnE and LDn0E are valid
+ // encodings of nullptr, used in different situations. Recognize LDnE here,
+ // leaving LDn0E to be recognized by the general logic afterward.
+ if (ParseThreeCharToken(state, "DnE")) return true;
+
+ // The second special case is a string literal, currently mangled in C++98
+ // style as LA<length + 1>_KcE. This is inadequate to support C++11 and
+ // later versions, and the discussion of this problem has not converged.
+ //
+ // https://github.com/itanium-cxx-abi/cxx-abi/issues/64
+ //
+ // For now the bare-type mangling is what's used in practice, so we
+ // recognize this form and only this form if an array type appears here.
+ // Someday we'll probably have to accept a new form of value mangling in
+ // LA...E constructs. (Note also that C++20 allows a wide range of
+ // class-type objects as template arguments, so someday their values will be
+ // mangled and we'll have to recognize them here too.)
+ if (RemainingInput(state)[0] == 'A' /* an array type follows */) {
+ if (ParseType(state) && ParseOneCharToken(state, 'E')) return true;
+ state->parse_state = copy;
+ return false;
+ }
+
+ // The merged cast production.
+ if (ParseType(state) && ParseExprCastValueAndTrailingE(state)) {
+ return true;
+ }
}
state->parse_state = copy;
@@ -1836,7 +2680,7 @@ static bool ParseExprPrimary(State *state) {
}
// <number> or <float>, followed by 'E', as described above ParseExprPrimary.
-static bool ParseExprCastValue(State *state) {
+static bool ParseExprCastValueAndTrailingE(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
// We have to be able to backtrack after accepting a number because we could
@@ -1848,39 +2692,148 @@ static bool ParseExprCastValue(State *state) {
}
state->parse_state = copy;
- if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
+ if (ParseFloatNumber(state)) {
+ // <float> for ordinary floating-point types
+ if (ParseOneCharToken(state, 'E')) return true;
+
+ // <float> _ <float> for complex floating-point types
+ if (ParseOneCharToken(state, '_') && ParseFloatNumber(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// Parses `Q <requires-clause expr>`.
+// If parsing fails, applies backtracking to `state`.
+//
+// This function covers two symbols instead of one for convenience,
+// because in LLVM's Itanium ABI mangling grammar, <requires-clause expr>
+// always appears after Q.
+//
+// Does not emit the parsed `requires` clause to simplify the implementation.
+// In other words, these two functions' mangled names will demangle identically:
+//
+// template <typename T>
+// int foo(T) requires IsIntegral<T>;
+//
+// vs.
+//
+// template <typename T>
+// int foo(T);
+static bool ParseQRequiresClauseExpr(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+
+ // <requires-clause expr> is just an <expression>: http://shortn/_9E1Ul0rIM8
+ if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) {
+ RestoreAppend(state, copy.append);
+ return true;
+ }
+
+ // also restores append
+ state->parse_state = copy;
+ return false;
+}
+
+// <requirement> ::= X <expression> [N] [R <type-constraint>]
+// <requirement> ::= T <type>
+// <requirement> ::= Q <constraint-expression>
+//
+// <constraint-expression> ::= <expression>
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+static bool ParseRequirement(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+
+ if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+ Optional(ParseOneCharToken(state, 'N')) &&
+ // This logic backtracks cleanly if we eat an R but a valid type doesn't
+ // follow it.
+ (!ParseOneCharToken(state, 'R') || ParseTypeConstraint(state))) {
return true;
}
state->parse_state = copy;
+ if (ParseOneCharToken(state, 'T') && ParseType(state)) return true;
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) return true;
+ state->parse_state = copy;
+
return false;
}
+// <type-constraint> ::= <name>
+static bool ParseTypeConstraint(State *state) {
+ return ParseName(state);
+}
+
// <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
// ::= Z <(function) encoding> E s [<discriminator>]
+// ::= Z <(function) encoding> E d [<(parameter) number>] _ <name>
//
// Parsing a common prefix of these two productions together avoids an
// exponential blowup of backtracking. Parse like:
// <local-name> := Z <encoding> E <local-name-suffix>
// <local-name-suffix> ::= s [<discriminator>]
+// ::= d [<(parameter) number>] _ <name>
// ::= <name> [<discriminator>]
static bool ParseLocalNameSuffix(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // <local-name-suffix> ::= d [<(parameter) number>] _ <name>
+ if (ParseOneCharToken(state, 'd') &&
+ (IsDigit(RemainingInput(state)[0]) || RemainingInput(state)[0] == '_')) {
+ int number = -1;
+ Optional(ParseNumber(state, &number));
+ if (number < -1 || number > 2147483645) {
+ // Work around overflow cases. We do not expect these outside of a fuzzer
+ // or other source of adversarial input. If we do detect overflow here,
+ // we'll print {default arg#1}.
+ number = -1;
+ }
+ number += 2;
+
+ // The ::{default arg#1}:: infix must be rendered before the lambda itself,
+ // so print this before parsing the rest of the <local-name-suffix>.
+ MaybeAppend(state, "::{default arg#");
+ MaybeAppendDecimal(state, number);
+ MaybeAppend(state, "}::");
+ if (ParseOneCharToken(state, '_') && ParseName(state)) return true;
+
+ // On late parse failure, roll back not only the input but also the output,
+ // whose trailing NUL was overwritten.
+ state->parse_state = copy;
+ if (state->parse_state.append) {
+ state->out[state->parse_state.out_cur_idx] = '\0';
+ }
+ return false;
+ }
+ state->parse_state = copy;
+ // <local-name-suffix> ::= <name> [<discriminator>]
if (MaybeAppend(state, "::") && ParseName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
-
- // Since we're not going to overwrite the above "::" by re-parsing the
- // <encoding> (whose trailing '\0' byte was in the byte now holding the
- // first ':'), we have to rollback the "::" if the <name> parse failed.
+ state->parse_state = copy;
if (state->parse_state.append) {
- state->out[state->parse_state.out_cur_idx - 2] = '\0';
+ state->out[state->parse_state.out_cur_idx] = '\0';
}
+ // <local-name-suffix> ::= s [<discriminator>]
return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
}
@@ -1896,12 +2849,22 @@ static bool ParseLocalName(State *state) {
return false;
}
-// <discriminator> := _ <(non-negative) number>
+// <discriminator> := _ <digit>
+// := __ <number (>= 10)> _
static bool ParseDiscriminator(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
- if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+
+ // Both forms start with _ so parse that first.
+ if (!ParseOneCharToken(state, '_')) return false;
+
+ // <digit>
+ if (ParseDigit(state, nullptr)) return true;
+
+ // _ <number> _
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
@@ -1947,6 +2910,7 @@ static bool ParseSubstitution(State *state, bool accept_std) {
MaybeAppend(state, p->real_name);
}
++state->parse_state.mangled_idx;
+ UpdateHighWaterMark(state);
return true;
}
}
@@ -1972,10 +2936,13 @@ static bool ParseTopLevelMangledName(State *state) {
MaybeAppend(state, RemainingInput(state));
return true;
}
+ ReportHighWaterMark(state);
return false; // Unconsumed suffix.
}
return true;
}
+
+ ReportHighWaterMark(state);
return false;
}
@@ -1985,6 +2952,10 @@ static bool Overflowed(const State *state) {
// The demangler entry point.
bool Demangle(const char* mangled, char* out, size_t out_size) {
+ if (mangled[0] == '_' && mangled[1] == 'R') {
+ return DemangleRustSymbolEncoding(mangled, out, out_size);
+ }
+
State state;
InitState(&state, mangled, out, out_size);
return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.h
index 146d1150ef..cb0aba13e5 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle.h
@@ -56,6 +56,9 @@ namespace debugging_internal {
//
// See the unit test for more examples.
//
+// Demangle also recognizes Rust mangled names by delegating the parsing of
+// anything that starts with _R to DemangleRustSymbolEncoding (demangle_rust.h).
+//
// Note: we might want to write demanglers for ABIs other than Itanium
// C++ ABI in the future.
bool Demangle(const char* mangled, char* out, size_t out_size);
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc
new file mode 100644
index 0000000000..4309bd849a
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.cc
@@ -0,0 +1,925 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/demangle_rust.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/debugging/internal/decode_rust_punycode.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+namespace {
+
+// Same step limit as the C++ demangler in demangle.cc uses.
+constexpr int kMaxReturns = 1 << 17;
+
+bool IsDigit(char c) { return '0' <= c && c <= '9'; }
+bool IsLower(char c) { return 'a' <= c && c <= 'z'; }
+bool IsUpper(char c) { return 'A' <= c && c <= 'Z'; }
+bool IsAlpha(char c) { return IsLower(c) || IsUpper(c); }
+bool IsIdentifierChar(char c) { return IsAlpha(c) || IsDigit(c) || c == '_'; }
+bool IsLowerHexDigit(char c) { return IsDigit(c) || ('a' <= c && c <= 'f'); }
+
+const char* BasicTypeName(char c) {
+ switch (c) {
+ case 'a': return "i8";
+ case 'b': return "bool";
+ case 'c': return "char";
+ case 'd': return "f64";
+ case 'e': return "str";
+ case 'f': return "f32";
+ case 'h': return "u8";
+ case 'i': return "isize";
+ case 'j': return "usize";
+ case 'l': return "i32";
+ case 'm': return "u32";
+ case 'n': return "i128";
+ case 'o': return "u128";
+ case 'p': return "_";
+ case 's': return "i16";
+ case 't': return "u16";
+ case 'u': return "()";
+ case 'v': return "...";
+ case 'x': return "i64";
+ case 'y': return "u64";
+ case 'z': return "!";
+ }
+ return nullptr;
+}
+
+// Parser for Rust symbol mangling v0, whose grammar is defined here:
+//
+// https://doc.rust-lang.org/rustc/symbol-mangling/v0.html#symbol-grammar-summary
+class RustSymbolParser {
+ public:
+ // Prepares to demangle the given encoding, a Rust symbol name starting with
+ // _R, into the output buffer [out, out_end). The caller is expected to
+ // continue by calling the new object's Parse function.
+ RustSymbolParser(const char* encoding, char* out, char* const out_end)
+ : encoding_(encoding), out_(out), out_end_(out_end) {
+ if (out_ != out_end_) *out_ = '\0';
+ }
+
+ // Parses the constructor's encoding argument, writing output into the range
+ // [out, out_end). Returns true on success and false for input whose
+ // structure was not recognized or exceeded implementation limits, such as by
+ // nesting structures too deep. In either case *this should not be used
+ // again.
+ ABSL_MUST_USE_RESULT bool Parse() && {
+ // Recursively parses the grammar production named by callee, then resumes
+ // execution at the next statement.
+ //
+ // Recursive-descent parsing is a beautifully readable translation of a
+ // grammar, but it risks stack overflow if implemented by naive recursion on
+ // the C++ call stack. So we simulate recursion by goto and switch instead,
+ // keeping a bounded stack of "return addresses" in the recursion_stack_
+ // member.
+ //
+ // The callee argument is a statement label. We goto that label after
+ // saving the "return address" on recursion_stack_. The next continue
+ // statement in the for loop below "returns" from this "call".
+ //
+ // The caller argument names the return point. Each value of caller must
+ // appear in only one ABSL_DEMANGLER_RECURSE call and be listed in the
+ // definition of enum ReturnAddress. The switch implements the control
+ // transfer from the end of a "called" subroutine back to the statement
+ // after the "call".
+ //
+ // Note that not all the grammar productions have to be packed into the
+ // switch, but only those which appear in a cycle in the grammar. Anything
+ // acyclic can be written as ordinary functions and function calls, e.g.,
+ // ParseIdentifier.
+#define ABSL_DEMANGLER_RECURSE(callee, caller) \
+ do { \
+ if (recursion_depth_ == kStackSize) return false; \
+ /* The next continue will switch on this saved value ... */ \
+ recursion_stack_[recursion_depth_++] = caller; \
+ goto callee; \
+ /* ... and will land here, resuming the suspended code. */ \
+ case caller: {} \
+ } while (0)
+
+ // Parse the encoding, counting completed recursive calls to guard against
+ // excessively complex input and infinite-loop bugs.
+ int iter = 0;
+ goto whole_encoding;
+ for (; iter < kMaxReturns && recursion_depth_ > 0; ++iter) {
+ // This switch resumes the code path most recently suspended by
+ // ABSL_DEMANGLER_RECURSE.
+ switch (recursion_stack_[--recursion_depth_]) {
+ //
+ // symbol-name ->
+ // _R decimal-number? path instantiating-crate? vendor-specific-suffix?
+ whole_encoding:
+ if (!Eat('_') || !Eat('R')) return false;
+ // decimal-number? is always empty today, so proceed to path, which
+ // can't start with a decimal digit.
+ ABSL_DEMANGLER_RECURSE(path, kInstantiatingCrate);
+ if (IsAlpha(Peek())) {
+ ++silence_depth_; // Print nothing more from here on.
+ ABSL_DEMANGLER_RECURSE(path, kVendorSpecificSuffix);
+ }
+ switch (Take()) {
+ case '.': case '$': case '\0': return true;
+ }
+ return false; // unexpected trailing content
+
+ // path -> crate-root | inherent-impl | trait-impl | trait-definition |
+ // nested-path | generic-args | backref
+ //
+ // Note that ABSL_DEMANGLER_RECURSE does not work inside a nested switch
+ // (which would hide the generated case label). Thus we jump out of the
+ // inner switch with gotos before performing any fake recursion.
+ path:
+ switch (Take()) {
+ case 'C': goto crate_root;
+ case 'M': goto inherent_impl;
+ case 'X': goto trait_impl;
+ case 'Y': goto trait_definition;
+ case 'N': goto nested_path;
+ case 'I': goto generic_args;
+ case 'B': goto path_backref;
+ default: return false;
+ }
+
+ // crate-root -> C identifier (C consumed above)
+ crate_root:
+ if (!ParseIdentifier()) return false;
+ continue;
+
+ // inherent-impl -> M impl-path type (M already consumed)
+ inherent_impl:
+ if (!Emit("<")) return false;
+ ABSL_DEMANGLER_RECURSE(impl_path, kInherentImplType);
+ ABSL_DEMANGLER_RECURSE(type, kInherentImplEnding);
+ if (!Emit(">")) return false;
+ continue;
+
+ // trait-impl -> X impl-path type path (X already consumed)
+ trait_impl:
+ if (!Emit("<")) return false;
+ ABSL_DEMANGLER_RECURSE(impl_path, kTraitImplType);
+ ABSL_DEMANGLER_RECURSE(type, kTraitImplInfix);
+ if (!Emit(" as ")) return false;
+ ABSL_DEMANGLER_RECURSE(path, kTraitImplEnding);
+ if (!Emit(">")) return false;
+ continue;
+
+ // impl-path -> disambiguator? path (but never print it!)
+ impl_path:
+ ++silence_depth_;
+ {
+ int ignored_disambiguator;
+ if (!ParseDisambiguator(ignored_disambiguator)) return false;
+ }
+ ABSL_DEMANGLER_RECURSE(path, kImplPathEnding);
+ --silence_depth_;
+ continue;
+
+ // trait-definition -> Y type path (Y already consumed)
+ trait_definition:
+ if (!Emit("<")) return false;
+ ABSL_DEMANGLER_RECURSE(type, kTraitDefinitionInfix);
+ if (!Emit(" as ")) return false;
+ ABSL_DEMANGLER_RECURSE(path, kTraitDefinitionEnding);
+ if (!Emit(">")) return false;
+ continue;
+
+ // nested-path -> N namespace path identifier (N already consumed)
+ // namespace -> lower | upper
+ nested_path:
+ // Uppercase namespaces must be saved on a stack so we can print
+ // ::{closure#0} or ::{shim:vtable#0} or ::{X:name#0} as needed.
+ if (IsUpper(Peek())) {
+ if (!PushNamespace(Take())) return false;
+ ABSL_DEMANGLER_RECURSE(path, kIdentifierInUppercaseNamespace);
+ if (!Emit("::")) return false;
+ if (!ParseIdentifier(PopNamespace())) return false;
+ continue;
+ }
+
+ // Lowercase namespaces, however, are never represented in the output;
+ // they all emit just ::name.
+ if (IsLower(Take())) {
+ ABSL_DEMANGLER_RECURSE(path, kIdentifierInLowercaseNamespace);
+ if (!Emit("::")) return false;
+ if (!ParseIdentifier()) return false;
+ continue;
+ }
+
+ // Neither upper or lower
+ return false;
+
+ // type -> basic-type | array-type | slice-type | tuple-type |
+ // ref-type | mut-ref-type | const-ptr-type | mut-ptr-type |
+ // fn-type | dyn-trait-type | path | backref
+ //
+ // We use ifs instead of switch (Take()) because the default case jumps
+ // to path, which will need to see the first character not yet Taken
+ // from the input. Because we do not use a nested switch here,
+ // ABSL_DEMANGLER_RECURSE works fine in the 'S' case.
+ type:
+ if (IsLower(Peek())) {
+ const char* type_name = BasicTypeName(Take());
+ if (type_name == nullptr || !Emit(type_name)) return false;
+ continue;
+ }
+ if (Eat('A')) {
+ // array-type = A type const
+ if (!Emit("[")) return false;
+ ABSL_DEMANGLER_RECURSE(type, kArraySize);
+ if (!Emit("; ")) return false;
+ ABSL_DEMANGLER_RECURSE(constant, kFinishArray);
+ if (!Emit("]")) return false;
+ continue;
+ }
+ if (Eat('S')) {
+ if (!Emit("[")) return false;
+ ABSL_DEMANGLER_RECURSE(type, kSliceEnding);
+ if (!Emit("]")) return false;
+ continue;
+ }
+ if (Eat('T')) goto tuple_type;
+ if (Eat('R')) {
+ if (!Emit("&")) return false;
+ if (!ParseOptionalLifetime()) return false;
+ goto type;
+ }
+ if (Eat('Q')) {
+ if (!Emit("&mut ")) return false;
+ if (!ParseOptionalLifetime()) return false;
+ goto type;
+ }
+ if (Eat('P')) {
+ if (!Emit("*const ")) return false;
+ goto type;
+ }
+ if (Eat('O')) {
+ if (!Emit("*mut ")) return false;
+ goto type;
+ }
+ if (Eat('F')) goto fn_type;
+ if (Eat('D')) goto dyn_trait_type;
+ if (Eat('B')) goto type_backref;
+ goto path;
+
+ // tuple-type -> T type* E (T already consumed)
+ tuple_type:
+ if (!Emit("(")) return false;
+
+ // The toolchain should call the unit type u instead of TE, but the
+ // grammar and other demanglers also recognize TE, so we do too.
+ if (Eat('E')) {
+ if (!Emit(")")) return false;
+ continue;
+ }
+
+ // A tuple with one element is rendered (type,) instead of (type).
+ ABSL_DEMANGLER_RECURSE(type, kAfterFirstTupleElement);
+ if (Eat('E')) {
+ if (!Emit(",)")) return false;
+ continue;
+ }
+
+ // A tuple with two elements is of course (x, y).
+ if (!Emit(", ")) return false;
+ ABSL_DEMANGLER_RECURSE(type, kAfterSecondTupleElement);
+ if (Eat('E')) {
+ if (!Emit(")")) return false;
+ continue;
+ }
+
+ // And (x, y, z) for three elements.
+ if (!Emit(", ")) return false;
+ ABSL_DEMANGLER_RECURSE(type, kAfterThirdTupleElement);
+ if (Eat('E')) {
+ if (!Emit(")")) return false;
+ continue;
+ }
+
+ // For longer tuples we write (x, y, z, ...), printing none of the
+ // content of the fourth and later types. Thus we avoid exhausting
+ // output buffers and human readers' patience when some library has a
+ // long tuple as an implementation detail, without having to
+ // completely obfuscate all tuples.
+ if (!Emit(", ...)")) return false;
+ ++silence_depth_;
+ while (!Eat('E')) {
+ ABSL_DEMANGLER_RECURSE(type, kAfterSubsequentTupleElement);
+ }
+ --silence_depth_;
+ continue;
+
+ // fn-type -> F fn-sig (F already consumed)
+ // fn-sig -> binder? U? (K abi)? type* E type
+ // abi -> C | undisambiguated-identifier
+ //
+ // We follow the C++ demangler in suppressing details of function
+ // signatures. Every function type is rendered "fn...".
+ fn_type:
+ if (!Emit("fn...")) return false;
+ ++silence_depth_;
+ if (!ParseOptionalBinder()) return false;
+ (void)Eat('U');
+ if (Eat('K')) {
+ if (!Eat('C') && !ParseUndisambiguatedIdentifier()) return false;
+ }
+ while (!Eat('E')) {
+ ABSL_DEMANGLER_RECURSE(type, kContinueParameterList);
+ }
+ ABSL_DEMANGLER_RECURSE(type, kFinishFn);
+ --silence_depth_;
+ continue;
+
+ // dyn-trait-type -> D dyn-bounds lifetime (D already consumed)
+ // dyn-bounds -> binder? dyn-trait* E
+ //
+ // The grammar strangely allows an empty trait list, even though the
+ // compiler should never output one. We follow existing demanglers in
+ // rendering DEL_ as "dyn ".
+ //
+ // Because auto traits lengthen a type name considerably without
+ // providing much value to a search for related source code, it would be
+ // desirable to abbreviate
+ // dyn main::Trait + std::marker::Copy + std::marker::Send
+ // to
+ // dyn main::Trait + ...,
+ // eliding the auto traits. But it is difficult to do so correctly, in
+ // part because there is no guarantee that the mangling will list the
+ // main trait first. So we just print all the traits in their order of
+ // appearance in the mangled name.
+ dyn_trait_type:
+ if (!Emit("dyn ")) return false;
+ if (!ParseOptionalBinder()) return false;
+ if (!Eat('E')) {
+ ABSL_DEMANGLER_RECURSE(dyn_trait, kBeginAutoTraits);
+ while (!Eat('E')) {
+ if (!Emit(" + ")) return false;
+ ABSL_DEMANGLER_RECURSE(dyn_trait, kContinueAutoTraits);
+ }
+ }
+ if (!ParseRequiredLifetime()) return false;
+ continue;
+
+ // dyn-trait -> path dyn-trait-assoc-binding*
+ // dyn-trait-assoc-binding -> p undisambiguated-identifier type
+ //
+ // We render nonempty binding lists as <>, omitting their contents as
+ // for generic-args.
+ dyn_trait:
+ ABSL_DEMANGLER_RECURSE(path, kContinueDynTrait);
+ if (Peek() == 'p') {
+ if (!Emit("<>")) return false;
+ ++silence_depth_;
+ while (Eat('p')) {
+ if (!ParseUndisambiguatedIdentifier()) return false;
+ ABSL_DEMANGLER_RECURSE(type, kContinueAssocBinding);
+ }
+ --silence_depth_;
+ }
+ continue;
+
+ // const -> type const-data | p | backref
+ //
+ // const is a C++ keyword, so we use the label `constant` instead.
+ constant:
+ if (Eat('B')) goto const_backref;
+ if (Eat('p')) {
+ if (!Emit("_")) return false;
+ continue;
+ }
+
+ // Scan the type without printing it.
+ //
+ // The Rust language restricts the type of a const generic argument
+ // much more than the mangling grammar does. We do not enforce this.
+ //
+ // We also do not bother printing false, true, 'A', and '\u{abcd}' for
+ // the types bool and char. Because we do not print generic-args
+ // contents, we expect to print constants only in array sizes, and
+ // those should not be bool or char.
+ ++silence_depth_;
+ ABSL_DEMANGLER_RECURSE(type, kConstData);
+ --silence_depth_;
+
+ // const-data -> n? hex-digit* _
+ //
+ // Although the grammar doesn't say this, existing demanglers expect
+ // that zero is 0, not an empty digit sequence, and no nonzero value
+ // may have leading zero digits. Also n0_ is accepted and printed as
+ // -0, though a toolchain will probably never write that encoding.
+ if (Eat('n') && !EmitChar('-')) return false;
+ if (!Emit("0x")) return false;
+ if (Eat('0')) {
+ if (!EmitChar('0')) return false;
+ if (!Eat('_')) return false;
+ continue;
+ }
+ while (IsLowerHexDigit(Peek())) {
+ if (!EmitChar(Take())) return false;
+ }
+ if (!Eat('_')) return false;
+ continue;
+
+ // generic-args -> I path generic-arg* E (I already consumed)
+ //
+ // We follow the C++ demangler in omitting all the arguments from the
+ // output, printing only the list opening and closing tokens.
+ generic_args:
+ ABSL_DEMANGLER_RECURSE(path, kBeginGenericArgList);
+ if (!Emit("::<>")) return false;
+ ++silence_depth_;
+ while (!Eat('E')) {
+ ABSL_DEMANGLER_RECURSE(generic_arg, kContinueGenericArgList);
+ }
+ --silence_depth_;
+ continue;
+
+ // generic-arg -> lifetime | type | K const
+ generic_arg:
+ if (Peek() == 'L') {
+ if (!ParseOptionalLifetime()) return false;
+ continue;
+ }
+ if (Eat('K')) goto constant;
+ goto type;
+
+ // backref -> B base-62-number (B already consumed)
+ //
+ // The BeginBackref call parses and range-checks the base-62-number. We
+ // always do that much.
+ //
+ // The recursive call parses and prints what the backref points at. We
+ // save CPU and stack by skipping this work if the output would be
+ // suppressed anyway.
+ path_backref:
+ if (!BeginBackref()) return false;
+ if (silence_depth_ == 0) {
+ ABSL_DEMANGLER_RECURSE(path, kPathBackrefEnding);
+ }
+ EndBackref();
+ continue;
+
+ // This represents the same backref production as in path_backref but
+ // parses the target as a type instead of a path.
+ type_backref:
+ if (!BeginBackref()) return false;
+ if (silence_depth_ == 0) {
+ ABSL_DEMANGLER_RECURSE(type, kTypeBackrefEnding);
+ }
+ EndBackref();
+ continue;
+
+ const_backref:
+ if (!BeginBackref()) return false;
+ if (silence_depth_ == 0) {
+ ABSL_DEMANGLER_RECURSE(constant, kConstantBackrefEnding);
+ }
+ EndBackref();
+ continue;
+ }
+ }
+
+ return false; // hit iteration limit or a bug in our stack handling
+ }
+
+ private:
+ // Enumerates resumption points for ABSL_DEMANGLER_RECURSE calls.
+ enum ReturnAddress : uint8_t {
+ kInstantiatingCrate,
+ kVendorSpecificSuffix,
+ kIdentifierInUppercaseNamespace,
+ kIdentifierInLowercaseNamespace,
+ kInherentImplType,
+ kInherentImplEnding,
+ kTraitImplType,
+ kTraitImplInfix,
+ kTraitImplEnding,
+ kImplPathEnding,
+ kTraitDefinitionInfix,
+ kTraitDefinitionEnding,
+ kArraySize,
+ kFinishArray,
+ kSliceEnding,
+ kAfterFirstTupleElement,
+ kAfterSecondTupleElement,
+ kAfterThirdTupleElement,
+ kAfterSubsequentTupleElement,
+ kContinueParameterList,
+ kFinishFn,
+ kBeginAutoTraits,
+ kContinueAutoTraits,
+ kContinueDynTrait,
+ kContinueAssocBinding,
+ kConstData,
+ kBeginGenericArgList,
+ kContinueGenericArgList,
+ kPathBackrefEnding,
+ kTypeBackrefEnding,
+ kConstantBackrefEnding,
+ };
+
+ // Element counts for the stack arrays. Larger stack sizes accommodate more
+ // deeply nested names at the cost of a larger footprint on the C++ call
+ // stack.
+ enum {
+ // Maximum recursive calls outstanding at one time.
+ kStackSize = 256,
+
+ // Maximum N<uppercase> nested-paths open at once. We do not expect
+ // closures inside closures inside closures as much as functions inside
+ // modules inside other modules, so we can use a smaller array here.
+ kNamespaceStackSize = 64,
+
+ // Maximum number of nested backrefs. We can keep this stack pretty small
+ // because we do not follow backrefs inside generic-args or other contexts
+ // that suppress printing, so deep stacking is unlikely in practice.
+ kPositionStackSize = 16,
+ };
+
+ // Returns the next input character without consuming it.
+ char Peek() const { return encoding_[pos_]; }
+
+ // Consumes and returns the next input character.
+ char Take() { return encoding_[pos_++]; }
+
+ // If the next input character is the given character, consumes it and returns
+ // true; otherwise returns false without consuming a character.
+ ABSL_MUST_USE_RESULT bool Eat(char want) {
+ if (encoding_[pos_] != want) return false;
+ ++pos_;
+ return true;
+ }
+
+ // Provided there is enough remaining output space, appends c to the output,
+ // writing a fresh NUL terminator afterward, and returns true. Returns false
+ // if the output buffer had less than two bytes free.
+ ABSL_MUST_USE_RESULT bool EmitChar(char c) {
+ if (silence_depth_ > 0) return true;
+ if (out_end_ - out_ < 2) return false;
+ *out_++ = c;
+ *out_ = '\0';
+ return true;
+ }
+
+ // Provided there is enough remaining output space, appends the C string token
+ // to the output, followed by a NUL character, and returns true. Returns
+ // false if not everything fit into the output buffer.
+ ABSL_MUST_USE_RESULT bool Emit(const char* token) {
+ if (silence_depth_ > 0) return true;
+ const size_t token_length = std::strlen(token);
+ const size_t bytes_to_copy = token_length + 1; // token and final NUL
+ if (static_cast<size_t>(out_end_ - out_) < bytes_to_copy) return false;
+ std::memcpy(out_, token, bytes_to_copy);
+ out_ += token_length;
+ return true;
+ }
+
+ // Provided there is enough remaining output space, appends the decimal form
+ // of disambiguator (if it's nonnegative) or "?" (if it's negative) to the
+ // output, followed by a NUL character, and returns true. Returns false if
+ // not everything fit into the output buffer.
+ ABSL_MUST_USE_RESULT bool EmitDisambiguator(int disambiguator) {
+ if (disambiguator < 0) return EmitChar('?'); // parsed but too large
+ if (disambiguator == 0) return EmitChar('0');
+ // Convert disambiguator to decimal text. Three digits per byte is enough
+ // because 999 > 256. The bound will remain correct even if future
+ // maintenance changes the type of the disambiguator variable.
+ char digits[3 * sizeof(disambiguator)] = {};
+ size_t leading_digit_index = sizeof(digits) - 1;
+ for (; disambiguator > 0; disambiguator /= 10) {
+ digits[--leading_digit_index] =
+ static_cast<char>('0' + disambiguator % 10);
+ }
+ return Emit(digits + leading_digit_index);
+ }
+
+ // Consumes an optional disambiguator (s123_) from the input.
+ //
+ // On success returns true and fills value with the encoded value if it was
+ // not too big, otherwise with -1. If the optional disambiguator was omitted,
+ // value is 0. On parse failure returns false and sets value to -1.
+ ABSL_MUST_USE_RESULT bool ParseDisambiguator(int& value) {
+ value = -1;
+
+ // disambiguator = s base-62-number
+ //
+ // Disambiguators are optional. An omitted disambiguator is zero.
+ if (!Eat('s')) {
+ value = 0;
+ return true;
+ }
+ int base_62_value = 0;
+ if (!ParseBase62Number(base_62_value)) return false;
+ value = base_62_value < 0 ? -1 : base_62_value + 1;
+ return true;
+ }
+
+ // Consumes a base-62 number like _ or 123_ from the input.
+ //
+ // On success returns true and fills value with the encoded value if it was
+ // not too big, otherwise with -1. On parse failure returns false and sets
+ // value to -1.
+ ABSL_MUST_USE_RESULT bool ParseBase62Number(int& value) {
+ value = -1;
+
+ // base-62-number = (digit | lower | upper)* _
+ //
+ // An empty base-62 digit sequence means 0.
+ if (Eat('_')) {
+ value = 0;
+ return true;
+ }
+
+ // A nonempty digit sequence denotes its base-62 value plus 1.
+ int encoded_number = 0;
+ bool overflowed = false;
+ while (IsAlpha(Peek()) || IsDigit(Peek())) {
+ const char c = Take();
+ if (encoded_number >= std::numeric_limits<int>::max()/62) {
+ // If we are close to overflowing an int, keep parsing but stop updating
+ // encoded_number and remember to return -1 at the end. The point is to
+ // avoid undefined behavior while parsing crate-root disambiguators,
+ // which are large in practice but not shown in demangling, while
+ // successfully computing closure and shim disambiguators, which are
+ // typically small and are printed out.
+ overflowed = true;
+ } else {
+ int digit;
+ if (IsDigit(c)) {
+ digit = c - '0';
+ } else if (IsLower(c)) {
+ digit = c - 'a' + 10;
+ } else {
+ digit = c - 'A' + 36;
+ }
+ encoded_number = 62 * encoded_number + digit;
+ }
+ }
+
+ if (!Eat('_')) return false;
+ if (!overflowed) value = encoded_number + 1;
+ return true;
+ }
+
+ // Consumes an identifier from the input, returning true on success.
+ //
+ // A nonzero uppercase_namespace specifies the character after the N in a
+ // nested-identifier, e.g., 'C' for a closure, allowing ParseIdentifier to
+ // write out the name with the conventional decoration for that namespace.
+ ABSL_MUST_USE_RESULT bool ParseIdentifier(char uppercase_namespace = '\0') {
+ // identifier -> disambiguator? undisambiguated-identifier
+ int disambiguator = 0;
+ if (!ParseDisambiguator(disambiguator)) return false;
+
+ return ParseUndisambiguatedIdentifier(uppercase_namespace, disambiguator);
+ }
+
+ // Consumes from the input an identifier with no preceding disambiguator,
+ // returning true on success.
+ //
+ // When ParseIdentifier calls this, it passes the N<namespace> character and
+ // disambiguator value so that "{closure#42}" and similar forms can be
+ // rendered correctly.
+ //
+ // At other appearances of undisambiguated-identifier in the grammar, this
+ // treatment is not applicable, and the call site omits both arguments.
+ ABSL_MUST_USE_RESULT bool ParseUndisambiguatedIdentifier(
+ char uppercase_namespace = '\0', int disambiguator = 0) {
+ // undisambiguated-identifier -> u? decimal-number _? bytes
+ const bool is_punycoded = Eat('u');
+ if (!IsDigit(Peek())) return false;
+ int num_bytes = 0;
+ if (!ParseDecimalNumber(num_bytes)) return false;
+ (void)Eat('_'); // optional separator, needed if a digit follows
+ if (is_punycoded) {
+ DecodeRustPunycodeOptions options;
+ options.punycode_begin = &encoding_[pos_];
+ options.punycode_end = &encoding_[pos_] + num_bytes;
+ options.out_begin = out_;
+ options.out_end = out_end_;
+ out_ = DecodeRustPunycode(options);
+ if (out_ == nullptr) return false;
+ pos_ += static_cast<size_t>(num_bytes);
+ }
+
+ // Emit the beginnings of braced forms like {shim:vtable#0}.
+ if (uppercase_namespace != '\0') {
+ switch (uppercase_namespace) {
+ case 'C':
+ if (!Emit("{closure")) return false;
+ break;
+ case 'S':
+ if (!Emit("{shim")) return false;
+ break;
+ default:
+ if (!EmitChar('{') || !EmitChar(uppercase_namespace)) return false;
+ break;
+ }
+ if (num_bytes > 0 && !Emit(":")) return false;
+ }
+
+ // Emit the name itself.
+ if (!is_punycoded) {
+ for (int i = 0; i < num_bytes; ++i) {
+ const char c = Take();
+ if (!IsIdentifierChar(c) &&
+ // The spec gives toolchains the choice of Punycode or raw UTF-8 for
+ // identifiers containing code points above 0x7f, so accept bytes
+ // with the high bit set.
+ (c & 0x80) == 0) {
+ return false;
+ }
+ if (!EmitChar(c)) return false;
+ }
+ }
+
+ // Emit the endings of braced forms, e.g., "#42}".
+ if (uppercase_namespace != '\0') {
+ if (!EmitChar('#')) return false;
+ if (!EmitDisambiguator(disambiguator)) return false;
+ if (!EmitChar('}')) return false;
+ }
+
+ return true;
+ }
+
+ // Consumes a decimal number like 0 or 123 from the input. On success returns
+ // true and fills value with the encoded value. If the encoded value is too
+ // large or otherwise unparsable, returns false and sets value to -1.
+ ABSL_MUST_USE_RESULT bool ParseDecimalNumber(int& value) {
+ value = -1;
+ if (!IsDigit(Peek())) return false;
+ int encoded_number = Take() - '0';
+ if (encoded_number == 0) {
+ // Decimal numbers are never encoded with extra leading zeroes.
+ value = 0;
+ return true;
+ }
+ while (IsDigit(Peek()) &&
+ // avoid overflow
+ encoded_number < std::numeric_limits<int>::max()/10) {
+ encoded_number = 10 * encoded_number + (Take() - '0');
+ }
+ if (IsDigit(Peek())) return false; // too big
+ value = encoded_number;
+ return true;
+ }
+
+ // Consumes a binder of higher-ranked lifetimes if one is present. On success
+ // returns true and discards the encoded lifetime count. On parse failure
+ // returns false.
+ ABSL_MUST_USE_RESULT bool ParseOptionalBinder() {
+ // binder -> G base-62-number
+ if (!Eat('G')) return true;
+ int ignored_binding_count;
+ return ParseBase62Number(ignored_binding_count);
+ }
+
+ // Consumes a lifetime if one is present.
+ //
+ // On success returns true and discards the lifetime index. We do not print
+ // or even range-check lifetimes because they are a finer detail than other
+ // things we omit from output, such as the entire contents of generic-args.
+ //
+ // On parse failure returns false.
+ ABSL_MUST_USE_RESULT bool ParseOptionalLifetime() {
+ // lifetime -> L base-62-number
+ if (!Eat('L')) return true;
+ int ignored_de_bruijn_index;
+ return ParseBase62Number(ignored_de_bruijn_index);
+ }
+
+ // Consumes a lifetime just like ParseOptionalLifetime, but returns false if
+ // there is no lifetime here.
+ ABSL_MUST_USE_RESULT bool ParseRequiredLifetime() {
+ if (Peek() != 'L') return false;
+ return ParseOptionalLifetime();
+ }
+
+ // Pushes ns onto the namespace stack and returns true if the stack is not
+ // full, else returns false.
+ ABSL_MUST_USE_RESULT bool PushNamespace(char ns) {
+ if (namespace_depth_ == kNamespaceStackSize) return false;
+ namespace_stack_[namespace_depth_++] = ns;
+ return true;
+ }
+
+ // Pops the last pushed namespace. Requires that the namespace stack is not
+ // empty (namespace_depth_ > 0).
+ char PopNamespace() { return namespace_stack_[--namespace_depth_]; }
+
+ // Pushes position onto the position stack and returns true if the stack is
+ // not full, else returns false.
+ ABSL_MUST_USE_RESULT bool PushPosition(int position) {
+ if (position_depth_ == kPositionStackSize) return false;
+ position_stack_[position_depth_++] = position;
+ return true;
+ }
+
+ // Pops the last pushed input position. Requires that the position stack is
+ // not empty (position_depth_ > 0).
+ int PopPosition() { return position_stack_[--position_depth_]; }
+
+ // Consumes a base-62-number denoting a backref target, pushes the current
+ // input position on the data stack, and sets the input position to the
+ // beginning of the backref target. Returns true on success. Returns false
+ // if parsing failed, the stack is exhausted, or the backref target position
+ // is out of range.
+ ABSL_MUST_USE_RESULT bool BeginBackref() {
+ // backref = B base-62-number (B already consumed)
+ //
+ // Reject backrefs that don't parse, overflow int, or don't point backward.
+ // If the offset looks fine, adjust it to account for the _R prefix.
+ int offset = 0;
+ const int offset_of_this_backref =
+ pos_ - 2 /* _R */ - 1 /* B already consumed */;
+ if (!ParseBase62Number(offset) || offset < 0 ||
+ offset >= offset_of_this_backref) {
+ return false;
+ }
+ offset += 2;
+
+ // Save the old position to restore later.
+ if (!PushPosition(pos_)) return false;
+
+ // Move the input position to the backref target.
+ //
+ // Note that we do not check whether the new position points to the
+ // beginning of a construct matching the context in which the backref
+ // appeared. We just jump to it and see whether nested parsing succeeds.
+ // We therefore accept various wrong manglings, e.g., a type backref
+ // pointing to an 'l' character inside an identifier, which happens to mean
+ // i32 when parsed as a type mangling. This saves the complexity and RAM
+ // footprint of remembering which offsets began which kinds of
+ // substructures. Existing demanglers take similar shortcuts.
+ pos_ = offset;
+ return true;
+ }
+
+ // Cleans up after a backref production by restoring the previous input
+ // position from the data stack.
+ void EndBackref() { pos_ = PopPosition(); }
+
+ // The leftmost recursion_depth_ elements of recursion_stack_ contain the
+ // ReturnAddresses pushed by ABSL_DEMANGLER_RECURSE calls not yet completed.
+ ReturnAddress recursion_stack_[kStackSize] = {};
+ int recursion_depth_ = 0;
+
+ // The leftmost namespace_depth_ elements of namespace_stack_ contain the
+ // uppercase namespace identifiers for open nested-paths, e.g., 'C' for a
+ // closure.
+ char namespace_stack_[kNamespaceStackSize] = {};
+ int namespace_depth_ = 0;
+
+ // The leftmost position_depth_ elements of position_stack_ contain the input
+ // positions to return to after fully printing the targets of backrefs.
+ int position_stack_[kPositionStackSize] = {};
+ int position_depth_ = 0;
+
+ // Anything parsed while silence_depth_ > 0 contributes nothing to the
+ // demangled output. For constructs omitted from the demangling, such as
+ // impl-path and the contents of generic-args, we will increment
+ // silence_depth_ on the way in and decrement silence_depth_ on the way out.
+ int silence_depth_ = 0;
+
+ // Input: encoding_ points to a Rust mangled symbol, and encoding_[pos_] is
+ // the next input character to be scanned.
+ int pos_ = 0;
+ const char* encoding_ = nullptr;
+
+ // Output: *out_ is where the next output character should be written, and
+ // out_end_ points past the last byte of available space.
+ char* out_ = nullptr;
+ char* out_end_ = nullptr;
+};
+
+} // namespace
+
+bool DemangleRustSymbolEncoding(const char* mangled, char* out,
+ size_t out_size) {
+ return RustSymbolParser(mangled, out, out + out_size).Parse();
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.h
new file mode 100644
index 0000000000..94a9aecb5e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/demangle_rust.h
@@ -0,0 +1,42 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
+#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
+
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Demangle the Rust encoding `mangled`. On success, return true and write the
+// demangled symbol name to `out`. Otherwise, return false, leaving unspecified
+// contents in `out`. For example, calling DemangleRustSymbolEncoding with
+// `mangled = "_RNvC8my_crate7my_func"` will yield `my_crate::my_func` in `out`,
+// provided `out_size` is large enough for that value and its trailing NUL.
+//
+// DemangleRustSymbolEncoding is async-signal-safe and runs in bounded C++
+// call-stack space. It is suitable for symbolizing stack traces in a signal
+// handler.
+bool DemangleRustSymbolEncoding(const char* mangled, char* out,
+ size_t out_size);
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
index 42dcd3cde9..2c16830983 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.cc
@@ -20,8 +20,11 @@
#ifdef ABSL_HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
#include <string.h>
+
#include <cassert>
#include <cstddef>
+#include <cstdint>
+
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
@@ -86,20 +89,14 @@ ElfMemImage::ElfMemImage(const void *base) {
Init(base);
}
-int ElfMemImage::GetNumSymbols() const {
- if (!hash_) {
- return 0;
- }
- // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
- return static_cast<int>(hash_[1]);
-}
+uint32_t ElfMemImage::GetNumSymbols() const { return num_syms_; }
-const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+const ElfW(Sym) * ElfMemImage::GetDynsym(uint32_t index) const {
ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
return dynsym_ + index;
}
-const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+const ElfW(Versym) *ElfMemImage::GetVersym(uint32_t index) const {
ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
return versym_ + index;
}
@@ -154,7 +151,7 @@ void ElfMemImage::Init(const void *base) {
dynstr_ = nullptr;
versym_ = nullptr;
verdef_ = nullptr;
- hash_ = nullptr;
+ num_syms_ = 0;
strsize_ = 0;
verdefnum_ = 0;
// Sentinel: PT_LOAD .p_vaddr can't possibly be this.
@@ -219,12 +216,17 @@ void ElfMemImage::Init(const void *base) {
base_as_char - reinterpret_cast<const char *>(link_base_);
ElfW(Dyn)* dynamic_entry = reinterpret_cast<ElfW(Dyn)*>(
static_cast<intptr_t>(dynamic_program_header->p_vaddr) + relocation);
+ uint32_t *sysv_hash = nullptr;
+ uint32_t *gnu_hash = nullptr;
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
const auto value =
static_cast<intptr_t>(dynamic_entry->d_un.d_val) + relocation;
switch (dynamic_entry->d_tag) {
case DT_HASH:
- hash_ = reinterpret_cast<ElfW(Word) *>(value);
+ sysv_hash = reinterpret_cast<uint32_t *>(value);
+ break;
+ case DT_GNU_HASH:
+ gnu_hash = reinterpret_cast<uint32_t *>(value);
break;
case DT_SYMTAB:
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
@@ -249,13 +251,38 @@ void ElfMemImage::Init(const void *base) {
break;
}
}
- if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+ if ((!sysv_hash && !gnu_hash) || !dynsym_ || !dynstr_ || !versym_ ||
!verdef_ || !verdefnum_ || !strsize_) {
assert(false); // invalid VDSO
// Mark this image as not present. Can not recur infinitely.
Init(nullptr);
return;
}
+ if (sysv_hash) {
+ num_syms_ = sysv_hash[1];
+ } else {
+ assert(gnu_hash);
+ // Compute the number of symbols for DT_GNU_HASH, which is specified by
+ // https://sourceware.org/gnu-gabi/program-loading-and-dynamic-linking.txt
+ uint32_t nbuckets = gnu_hash[0];
+ // The buckets array is located after the header (4 uint32) and the bloom
+ // filter (size_t array of gnu_hash[2] elements).
+ uint32_t *buckets = gnu_hash + 4 + sizeof(size_t) / 4 * gnu_hash[2];
+ // Find the chain of the last non-empty bucket.
+ uint32_t idx = 0;
+ for (uint32_t i = nbuckets; i > 0;) {
+ idx = buckets[--i];
+ if (idx != 0) break;
+ }
+ if (idx != 0) {
+ // Find the last element of the chain, which has an odd value.
+ // Add one to get the number of symbols.
+ uint32_t *chain = buckets + nbuckets - gnu_hash[1];
+ while (chain[idx++] % 2 == 0) {
+ }
+ }
+ num_syms_ = idx;
+ }
}
bool ElfMemImage::LookupSymbol(const char *name,
@@ -300,9 +327,9 @@ bool ElfMemImage::LookupSymbolByAddress(const void *address,
return false;
}
-ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
- : index_(index), image_(image) {
-}
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image,
+ uint32_t index)
+ : index_(index), image_(image) {}
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
return &info_;
@@ -335,7 +362,7 @@ ElfMemImage::SymbolIterator ElfMemImage::end() const {
return SymbolIterator(this, GetNumSymbols());
}
-void ElfMemImage::SymbolIterator::Update(int increment) {
+void ElfMemImage::SymbolIterator::Update(uint32_t increment) {
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
if (!image->IsPresent()) {
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.h
index e7fe6ab06e..19c4952e2f 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.h
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/elf_mem_image.h
@@ -22,6 +22,7 @@
// Including this will define the __GLIBC__ macro if glibc is being
// used.
#include <climits>
+#include <cstdint>
#include "absl/base/config.h"
@@ -82,10 +83,10 @@ class ElfMemImage {
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
- SymbolIterator(const void *const image, int index);
- void Update(int incr);
+ SymbolIterator(const void *const image, uint32_t index);
+ void Update(uint32_t incr);
SymbolInfo info_;
- int index_;
+ uint32_t index_;
const void *const image_;
};
@@ -94,14 +95,14 @@ class ElfMemImage {
void Init(const void *base);
bool IsPresent() const { return ehdr_ != nullptr; }
const ElfW(Phdr)* GetPhdr(int index) const;
- const ElfW(Sym)* GetDynsym(int index) const;
- const ElfW(Versym)* GetVersym(int index) const;
+ const ElfW(Sym) * GetDynsym(uint32_t index) const;
+ const ElfW(Versym)* GetVersym(uint32_t index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
- int GetNumSymbols() const;
+ uint32_t GetNumSymbols() const;
SymbolIterator begin() const;
SymbolIterator end() const;
@@ -124,8 +125,8 @@ class ElfMemImage {
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
- const ElfW(Word) *hash_;
const char *dynstr_;
+ uint32_t num_syms_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
index 1caf7bbe35..b123479b1a 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -89,6 +89,8 @@ struct StackInfo {
static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr);
+ if (stack_info->sig_stack_high == kUnknownStackEnd)
+ return false;
return (comparable_ptr >= stack_info->sig_stack_low &&
comparable_ptr < stack_info->sig_stack_high);
}
@@ -122,13 +124,6 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
if (pre_signal_frame_pointer >= old_frame_pointer) {
new_frame_pointer = pre_signal_frame_pointer;
}
- // Check that alleged frame pointer is actually readable. This is to
- // prevent "double fault" in case we hit the first fault due to e.g.
- // stack corruption.
- if (!absl::debugging_internal::AddressIsReadable(
- new_frame_pointer))
- return nullptr;
- }
}
#endif
@@ -136,6 +131,14 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc,
if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
return nullptr;
+ // Check that alleged frame pointer is actually readable. This is to
+ // prevent "double fault" in case we hit the first fault due to e.g.
+ // stack corruption.
+ if (!absl::debugging_internal::AddressIsReadable(
+ new_frame_pointer))
+ return nullptr;
+ }
+
// Only check the size if both frames are in the same stack.
if (InsideSignalStack(new_frame_pointer, stack_info) ==
InsideSignalStack(old_frame_pointer, stack_info)) {
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.cc b/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.cc
new file mode 100644
index 0000000000..658a3b513b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.cc
@@ -0,0 +1,70 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/utf8_for_code_point.h"
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+// UTF-8 encoding bounds.
+constexpr uint32_t kMinSurrogate = 0xd800, kMaxSurrogate = 0xdfff;
+constexpr uint32_t kMax1ByteCodePoint = 0x7f;
+constexpr uint32_t kMax2ByteCodePoint = 0x7ff;
+constexpr uint32_t kMax3ByteCodePoint = 0xffff;
+constexpr uint32_t kMaxCodePoint = 0x10ffff;
+
+} // namespace
+
+Utf8ForCodePoint::Utf8ForCodePoint(uint64_t code_point) {
+ if (code_point <= kMax1ByteCodePoint) {
+ length = 1;
+ bytes[0] = static_cast<char>(code_point);
+ return;
+ }
+
+ if (code_point <= kMax2ByteCodePoint) {
+ length = 2;
+ bytes[0] = static_cast<char>(0xc0 | (code_point >> 6));
+ bytes[1] = static_cast<char>(0x80 | (code_point & 0x3f));
+ return;
+ }
+
+ if (kMinSurrogate <= code_point && code_point <= kMaxSurrogate) return;
+
+ if (code_point <= kMax3ByteCodePoint) {
+ length = 3;
+ bytes[0] = static_cast<char>(0xe0 | (code_point >> 12));
+ bytes[1] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
+ bytes[2] = static_cast<char>(0x80 | (code_point & 0x3f));
+ return;
+ }
+
+ if (code_point > kMaxCodePoint) return;
+
+ length = 4;
+ bytes[0] = static_cast<char>(0xf0 | (code_point >> 18));
+ bytes[1] = static_cast<char>(0x80 | ((code_point >> 12) & 0x3f));
+ bytes[2] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
+ bytes[3] = static_cast<char>(0x80 | (code_point & 0x3f));
+}
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.h b/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.h
new file mode 100644
index 0000000000..f23cde6d95
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/debugging/internal/utf8_for_code_point.h
@@ -0,0 +1,47 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
+#define ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+struct Utf8ForCodePoint {
+ // Converts a Unicode code point to the corresponding UTF-8 byte sequence.
+ // Async-signal-safe to support use in symbolizing stack traces from a signal
+ // handler.
+ explicit Utf8ForCodePoint(uint64_t code_point);
+
+ // Returns true if the constructor's code_point argument was valid.
+ bool ok() const { return length != 0; }
+
+ // If code_point was in range, then 1 <= length <= 4, and the UTF-8 encoding
+ // is found in bytes[0 .. (length - 1)]. If code_point was invalid, then
+ // length == 0. In either case, the contents of bytes[length .. 3] are
+ // unspecified.
+ char bytes[4] = {};
+ uint32_t length = 0;
+};
+
+} // namespace debugging_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/debugging/ya.make b/contrib/restricted/abseil-cpp/absl/debugging/ya.make
index cc02fbcb4f..4440433c50 100644
--- a/contrib/restricted/abseil-cpp/absl/debugging/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/debugging/ya.make
@@ -22,9 +22,12 @@ NO_UTIL()
SRCS(
failure_signal_handler.cc
internal/address_is_readable.cc
+ internal/decode_rust_punycode.cc
internal/demangle.cc
+ internal/demangle_rust.cc
internal/elf_mem_image.cc
internal/examine_stack.cc
+ internal/utf8_for_code_point.cc
internal/vdso_support.cc
leak_check.cc
stacktrace.cc
diff --git a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
index c30aa6094f..26ec0e7d84 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/commandlineflag.h
@@ -59,6 +59,14 @@ class PrivateHandleAccessor;
// // Now you can get flag info from that reflection handle.
// std::string flag_location = my_flag_data->Filename();
// ...
+
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
class CommandLineFlag {
public:
constexpr CommandLineFlag() = default;
@@ -193,6 +201,9 @@ class CommandLineFlag {
// flag's value type.
virtual void CheckDefaultValueParsingRoundtrip() const = 0;
};
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/flags/flag.h b/contrib/restricted/abseil-cpp/absl/flags/flag.h
index 06ea6932f9..a8e0e93299 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/flag.h
@@ -29,12 +29,14 @@
#ifndef ABSL_FLAGS_FLAG_H_
#define ABSL_FLAGS_FLAG_H_
+#include <cstdint>
#include <string>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
+#include "absl/flags/commandlineflag.h"
#include "absl/flags/config.h"
#include "absl/flags/internal/flag.h"
#include "absl/flags/internal/registry.h"
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
index 65d0e58f84..981f19fdb8 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.cc
@@ -22,14 +22,17 @@
#include <array>
#include <atomic>
+#include <cstring>
#include <memory>
-#include <new>
#include <string>
#include <typeinfo>
+#include <vector>
+#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
+#include "absl/base/const_init.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/flags/config.h"
@@ -44,10 +47,9 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-// The help message indicating that the commandline flag has been
-// 'stripped'. It will not show up when doing "-help" and its
-// variants. The flag is stripped if ABSL_FLAGS_STRIP_HELP is set to 1
-// before including absl/flags/flag.h
+// The help message indicating that the commandline flag has been stripped. It
+// will not show up when doing "-help" and its variants. The flag is stripped
+// if ABSL_FLAGS_STRIP_HELP is set to 1 before including absl/flags/flag.h
const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001";
namespace {
@@ -78,9 +80,32 @@ class MutexRelock {
absl::Mutex& mu_;
};
+// This is a freelist of leaked flag values and guard for its access.
+// When we can't guarantee it is safe to reuse the memory for flag values,
+// we move the memory to the freelist where it lives indefinitely, so it can
+// still be safely accessed. This also prevents leak checkers from complaining
+// about the leaked memory that can no longer be accessed through any pointer.
+ABSL_CONST_INIT absl::Mutex s_freelist_guard(absl::kConstInit);
+ABSL_CONST_INIT std::vector<void*>* s_freelist = nullptr;
+
+void AddToFreelist(void* p) {
+ absl::MutexLock l(&s_freelist_guard);
+ if (!s_freelist) {
+ s_freelist = new std::vector<void*>;
+ }
+ s_freelist->push_back(p);
+}
+
} // namespace
///////////////////////////////////////////////////////////////////////////////
+
+uint64_t NumLeakedFlagValues() {
+ absl::MutexLock l(&s_freelist_guard);
+ return s_freelist == nullptr ? 0u : s_freelist->size();
+}
+
+///////////////////////////////////////////////////////////////////////////////
// Persistent state of the flag data.
class FlagImpl;
@@ -97,7 +122,7 @@ class FlagState : public flags_internal::FlagStateInterface {
counter_(counter) {}
~FlagState() override {
- if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
+ if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kHeapAllocated &&
flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
return;
flags_internal::Delete(flag_impl_.op_, value_.heap_allocated);
@@ -140,6 +165,33 @@ void DynValueDeleter::operator()(void* ptr) const {
Delete(op, ptr);
}
+MaskedPointer::MaskedPointer(ptr_t rhs, bool is_candidate) : ptr_(rhs) {
+ if (is_candidate) {
+ ApplyMask(kUnprotectedReadCandidate);
+ }
+}
+
+bool MaskedPointer::IsUnprotectedReadCandidate() const {
+ return CheckMask(kUnprotectedReadCandidate);
+}
+
+bool MaskedPointer::HasBeenRead() const { return CheckMask(kHasBeenRead); }
+
+void MaskedPointer::Set(FlagOpFn op, const void* src, bool is_candidate) {
+ flags_internal::Copy(op, src, Ptr());
+ if (is_candidate) {
+ ApplyMask(kUnprotectedReadCandidate);
+ }
+}
+void MaskedPointer::MarkAsRead() { ApplyMask(kHasBeenRead); }
+
+void MaskedPointer::ApplyMask(mask_t mask) {
+ ptr_ = reinterpret_cast<ptr_t>(reinterpret_cast<mask_t>(ptr_) | mask);
+}
+bool MaskedPointer::CheckMask(mask_t mask) const {
+ return (reinterpret_cast<mask_t>(ptr_) & mask) != 0;
+}
+
void FlagImpl::Init() {
new (&data_guard_) absl::Mutex;
@@ -174,11 +226,16 @@ void FlagImpl::Init() {
(*default_value_.gen_func)(AtomicBufferValue());
break;
}
- case FlagValueStorageKind::kAlignedBuffer:
+ case FlagValueStorageKind::kHeapAllocated:
// For this storage kind the default_value_ always points to gen_func
// during initialization.
assert(def_kind == FlagDefaultKind::kGenFunc);
- (*default_value_.gen_func)(AlignedBufferValue());
+ // Flag value initially points to the internal buffer.
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+ (*default_value_.gen_func)(ptr_value.Ptr());
+ // Default value is a candidate for an unprotected read.
+ PtrStorage().store(MaskedPointer(ptr_value.Ptr(), true),
+ std::memory_order_release);
break;
}
seq_lock_.MarkInitialized();
@@ -234,7 +291,7 @@ std::unique_ptr<void, DynValueDeleter> FlagImpl::MakeInitValue() const {
return {res, DynValueDeleter{op_}};
}
-void FlagImpl::StoreValue(const void* src) {
+void FlagImpl::StoreValue(const void* src, ValueSource source) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic: {
@@ -249,8 +306,27 @@ void FlagImpl::StoreValue(const void* src) {
seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
break;
}
- case FlagValueStorageKind::kAlignedBuffer:
- Copy(op_, src, AlignedBufferValue());
+ case FlagValueStorageKind::kHeapAllocated:
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+ if (ptr_value.IsUnprotectedReadCandidate() && ptr_value.HasBeenRead()) {
+ // If current value is a candidate for an unprotected read and if it was
+ // already read at least once, follow up reads (if any) are done without
+ // mutex protection. We can't guarantee it is safe to reuse this memory
+ // since it may have been accessed by another thread concurrently, so
+ // instead we move the memory to a freelist so it can still be safely
+ // accessed, and allocate a new one for the new value.
+ AddToFreelist(ptr_value.Ptr());
+ ptr_value = MaskedPointer(Clone(op_, src), source == kCommandLine);
+ } else {
+ // Current value either was set programmatically or was never read.
+ // We can reuse the memory since all accesses to this value (if any)
+ // were protected by mutex. That said, if a new value comes from command
+ // line it now becomes a candidate for an unprotected read.
+ ptr_value.Set(op_, src, source == kCommandLine);
+ }
+
+ PtrStorage().store(ptr_value, std::memory_order_release);
seq_lock_.IncrementModificationCount();
break;
}
@@ -305,9 +381,10 @@ std::string FlagImpl::CurrentValue() const {
ReadSequenceLockedData(cloned.get());
return flags_internal::Unparse(op_, cloned.get());
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
absl::MutexLock l(guard);
- return flags_internal::Unparse(op_, AlignedBufferValue());
+ return flags_internal::Unparse(
+ op_, PtrStorage().load(std::memory_order_acquire).Ptr());
}
}
@@ -370,10 +447,12 @@ std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() {
return absl::make_unique<FlagState>(*this, cloned, modified,
on_command_line, ModificationCount());
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
return absl::make_unique<FlagState>(
- *this, flags_internal::Clone(op_, AlignedBufferValue()), modified,
- on_command_line, ModificationCount());
+ *this,
+ flags_internal::Clone(
+ op_, PtrStorage().load(std::memory_order_acquire).Ptr()),
+ modified, on_command_line, ModificationCount());
}
}
return nullptr;
@@ -388,11 +467,11 @@ bool FlagImpl::RestoreState(const FlagState& flag_state) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic:
- StoreValue(&flag_state.value_.one_word);
+ StoreValue(&flag_state.value_.one_word, kProgrammaticChange);
break;
case FlagValueStorageKind::kSequenceLocked:
- case FlagValueStorageKind::kAlignedBuffer:
- StoreValue(flag_state.value_.heap_allocated);
+ case FlagValueStorageKind::kHeapAllocated:
+ StoreValue(flag_state.value_.heap_allocated, kProgrammaticChange);
break;
}
@@ -411,11 +490,6 @@ StorageT* FlagImpl::OffsetValue() const {
return reinterpret_cast<StorageT*>(p + offset);
}
-void* FlagImpl::AlignedBufferValue() const {
- assert(ValueStorageKind() == FlagValueStorageKind::kAlignedBuffer);
- return OffsetValue<void>();
-}
-
std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
return OffsetValue<std::atomic<uint64_t>>();
@@ -427,6 +501,11 @@ std::atomic<int64_t>& FlagImpl::OneWordValue() const {
return OffsetValue<FlagOneWordValue>()->value;
}
+std::atomic<MaskedPointer>& FlagImpl::PtrStorage() const {
+ assert(ValueStorageKind() == FlagValueStorageKind::kHeapAllocated);
+ return OffsetValue<FlagMaskedPointerValue>()->value;
+}
+
// Attempts to parse supplied `value` string using parsing routine in the `flag`
// argument. If parsing successful, this function replaces the dst with newly
// parsed value. In case if any error is encountered in either step, the error
@@ -460,9 +539,17 @@ void FlagImpl::Read(void* dst) const {
ReadSequenceLockedData(dst);
break;
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
absl::MutexLock l(guard);
- flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst);
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+ flags_internal::CopyConstruct(op_, ptr_value.Ptr(), dst);
+
+ // For unprotected read candidates, mark that the value as has been read.
+ if (ptr_value.IsUnprotectedReadCandidate() && !ptr_value.HasBeenRead()) {
+ ptr_value.MarkAsRead();
+ PtrStorage().store(ptr_value, std::memory_order_release);
+ }
break;
}
}
@@ -513,7 +600,7 @@ void FlagImpl::Write(const void* src) {
}
}
- StoreValue(src);
+ StoreValue(src, kProgrammaticChange);
}
// Sets the value of the flag based on specified string `value`. If the flag
@@ -534,7 +621,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
auto tentative_value = TryParse(value, err);
if (!tentative_value) return false;
- StoreValue(tentative_value.get());
+ StoreValue(tentative_value.get(), source);
if (source == kCommandLine) {
on_command_line_ = true;
@@ -555,7 +642,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
auto tentative_value = TryParse(value, err);
if (!tentative_value) return false;
- StoreValue(tentative_value.get());
+ StoreValue(tentative_value.get(), source);
break;
}
case SET_FLAGS_DEFAULT: {
@@ -573,7 +660,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
if (!modified_) {
// Need to set both default value *and* current, in this case.
- StoreValue(default_value_.dynamic_value);
+ StoreValue(default_value_.dynamic_value, source);
modified_ = false;
}
break;
diff --git a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
index 2e6e6b8742..a0be31d93a 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
+++ b/contrib/restricted/abseil-cpp/absl/flags/internal/flag.h
@@ -22,7 +22,6 @@
#include <atomic>
#include <cstring>
#include <memory>
-#include <new>
#include <string>
#include <type_traits>
#include <typeinfo>
@@ -296,11 +295,8 @@ constexpr FlagDefaultArg DefaultArg(char) {
}
///////////////////////////////////////////////////////////////////////////////
-// Flag current value auxiliary structs.
-
-constexpr int64_t UninitializedFlagValue() {
- return static_cast<int64_t>(0xababababababababll);
-}
+// Flag storage selector traits. Each trait indicates what kind of storage kind
+// to use for the flag value.
template <typename T>
using FlagUseValueAndInitBitStorage =
@@ -322,9 +318,11 @@ enum class FlagValueStorageKind : uint8_t {
kValueAndInitBit = 0,
kOneWordAtomic = 1,
kSequenceLocked = 2,
- kAlignedBuffer = 3,
+ kHeapAllocated = 3,
};
+// This constexpr function returns the storage kind for the given flag value
+// type.
template <typename T>
static constexpr FlagValueStorageKind StorageKind() {
return FlagUseValueAndInitBitStorage<T>::value
@@ -333,14 +331,24 @@ static constexpr FlagValueStorageKind StorageKind() {
? FlagValueStorageKind::kOneWordAtomic
: FlagUseSequenceLockStorage<T>::value
? FlagValueStorageKind::kSequenceLocked
- : FlagValueStorageKind::kAlignedBuffer;
+ : FlagValueStorageKind::kHeapAllocated;
}
+// This is a base class for the storage classes used by kOneWordAtomic and
+// kValueAndInitBit storage kinds. It literally just stores the one word value
+// as an atomic. By default, it is initialized to a magic value that is unlikely
+// a valid value for the flag value type.
struct FlagOneWordValue {
+ constexpr static int64_t Uninitialized() {
+ return static_cast<int64_t>(0xababababababababll);
+ }
+
+ constexpr FlagOneWordValue() : value(Uninitialized()) {}
constexpr explicit FlagOneWordValue(int64_t v) : value(v) {}
std::atomic<int64_t> value;
};
+// This class represents a memory layout used by kValueAndInitBit storage kind.
template <typename T>
struct alignas(8) FlagValueAndInitBit {
T value;
@@ -349,16 +357,91 @@ struct alignas(8) FlagValueAndInitBit {
uint8_t init;
};
+// This class implements an aligned pointer with two options stored via masks
+// in unused bits of the pointer value (due to alignment requirement).
+// - IsUnprotectedReadCandidate - indicates that the value can be switched to
+// unprotected read without a lock.
+// - HasBeenRead - indicates that the value has been read at least once.
+// - AllowsUnprotectedRead - combination of the two options above and indicates
+// that the value can now be read without a lock.
+// Further details of these options and their use is covered in the description
+// of the FlagValue<T, FlagValueStorageKind::kHeapAllocated> specialization.
+class MaskedPointer {
+ public:
+ using mask_t = uintptr_t;
+ using ptr_t = void*;
+
+ static constexpr int RequiredAlignment() { return 4; }
+
+ constexpr explicit MaskedPointer(ptr_t rhs) : ptr_(rhs) {}
+ MaskedPointer(ptr_t rhs, bool is_candidate);
+
+ void* Ptr() const {
+ return reinterpret_cast<void*>(reinterpret_cast<mask_t>(ptr_) &
+ kPtrValueMask);
+ }
+ bool AllowsUnprotectedRead() const {
+ return (reinterpret_cast<mask_t>(ptr_) & kAllowsUnprotectedRead) ==
+ kAllowsUnprotectedRead;
+ }
+ bool IsUnprotectedReadCandidate() const;
+ bool HasBeenRead() const;
+
+ void Set(FlagOpFn op, const void* src, bool is_candidate);
+ void MarkAsRead();
+
+ private:
+ // Masks
+ // Indicates that the flag value either default or originated from command
+ // line.
+ static constexpr mask_t kUnprotectedReadCandidate = 0x1u;
+ // Indicates that flag has been read.
+ static constexpr mask_t kHasBeenRead = 0x2u;
+ static constexpr mask_t kAllowsUnprotectedRead =
+ kUnprotectedReadCandidate | kHasBeenRead;
+ static constexpr mask_t kPtrValueMask = ~kAllowsUnprotectedRead;
+
+ void ApplyMask(mask_t mask);
+ bool CheckMask(mask_t mask) const;
+
+ ptr_t ptr_;
+};
+
+// This class implements a type erased storage of the heap allocated flag value.
+// It is used as a base class for the storage class for kHeapAllocated storage
+// kind. The initial_buffer is expected to have an alignment of at least
+// MaskedPointer::RequiredAlignment(), so that the bits used by the
+// MaskedPointer to store masks are set to 0. This guarantees that value starts
+// in an uninitialized state.
+struct FlagMaskedPointerValue {
+ constexpr explicit FlagMaskedPointerValue(MaskedPointer::ptr_t initial_buffer)
+ : value(MaskedPointer(initial_buffer)) {}
+
+ std::atomic<MaskedPointer> value;
+};
+
+// This is the forward declaration for the template that represents a storage
+// for the flag values. This template is expected to be explicitly specialized
+// for each storage kind and it does not have a generic default
+// implementation.
template <typename T,
FlagValueStorageKind Kind = flags_internal::StorageKind<T>()>
struct FlagValue;
+// This specialization represents the storage of flag values types with the
+// kValueAndInitBit storage kind. It is based on the FlagOneWordValue class
+// and relies on memory layout in FlagValueAndInitBit<T> to indicate that the
+// value has been initialized or not.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
constexpr FlagValue() : FlagOneWordValue(0) {}
bool Get(const SequenceLock&, T& dst) const {
int64_t storage = value.load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(storage == 0)) {
+ // This assert is to ensure that the initialization inside FlagImpl::Init
+ // is able to set init member correctly.
+ static_assert(offsetof(FlagValueAndInitBit<T>, init) == sizeof(T),
+ "Unexpected memory layout of FlagValueAndInitBit");
return false;
}
dst = absl::bit_cast<FlagValueAndInitBit<T>>(storage).value;
@@ -366,12 +449,16 @@ struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
}
};
+// This specialization represents the storage of flag values types with the
+// kOneWordAtomic storage kind. It is based on the FlagOneWordValue class
+// and relies on the magic uninitialized state of default constructed instead of
+// FlagOneWordValue to indicate that the value has been initialized or not.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
- constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {}
+ constexpr FlagValue() : FlagOneWordValue() {}
bool Get(const SequenceLock&, T& dst) const {
int64_t one_word_val = value.load(std::memory_order_acquire);
- if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) {
+ if (ABSL_PREDICT_FALSE(one_word_val == FlagOneWordValue::Uninitialized())) {
return false;
}
std::memcpy(&dst, static_cast<const void*>(&one_word_val), sizeof(T));
@@ -379,6 +466,12 @@ struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
}
};
+// This specialization represents the storage of flag values types with the
+// kSequenceLocked storage kind. This storage is used by trivially copyable
+// types with size greater than 8 bytes. This storage relies on uninitialized
+// state of the SequenceLock to indicate that the value has been initialized or
+// not. This storage also provides lock-free read access to the underlying
+// value once it is initialized.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
bool Get(const SequenceLock& lock, T& dst) const {
@@ -392,11 +485,62 @@ struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
};
+// This specialization represents the storage of flag values types with the
+// kHeapAllocated storage kind. This is a storage of last resort and is used
+// if none of other storage kinds are applicable.
+//
+// Generally speaking the values with this storage kind can't be accessed
+// atomically and thus can't be read without holding a lock. If we would ever
+// want to avoid the lock, we'd need to leak the old value every time new flag
+// value is being set (since we are in danger of having a race condition
+// otherwise).
+//
+// Instead of doing that, this implementation attempts to cater to some common
+// use cases by allowing at most 2 values to be leaked - default value and
+// value set from the command line.
+//
+// This specialization provides an initial buffer for the first flag value. This
+// is where the default value is going to be stored. We attempt to reuse this
+// buffer if possible, including storing the value set from the command line
+// there.
+//
+// As long as we only read this value, we can access it without a lock (in
+// practice we still use the lock for the very first read to be able set
+// "has been read" option on this flag).
+//
+// If flag is specified on the command line we store the parsed value either
+// in the internal buffer (if the default value never been read) or we leak the
+// default value and allocate the new storage for the parse value. This value is
+// also a candidate for an unprotected read. If flag is set programmatically
+// after the command line is parsed, the storage for this value is going to be
+// leaked. Note that in both scenarios we are not going to have a real leak.
+// Instead we'll store the leaked value pointers in the internal freelist to
+// avoid triggering the memory leak checker complains.
+//
+// If the flag is ever set programmatically, it stops being the candidate for an
+// unprotected read, and any follow up access to the flag value requires a lock.
+// Note that if the value if set programmatically before the command line is
+// parsed, we can switch back to enabling unprotected reads for that value.
template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> {
- bool Get(const SequenceLock&, T&) const { return false; }
+struct FlagValue<T, FlagValueStorageKind::kHeapAllocated>
+ : FlagMaskedPointerValue {
+ // We const initialize the value with unmasked pointer to the internal buffer,
+ // making sure it is not a candidate for unprotected read. This way we can
+ // ensure Init is done before any access to the flag value.
+ constexpr FlagValue() : FlagMaskedPointerValue(&buffer[0]) {}
+
+ bool Get(const SequenceLock&, T& dst) const {
+ MaskedPointer ptr_value = value.load(std::memory_order_acquire);
- alignas(T) char value[sizeof(T)];
+ if (ABSL_PREDICT_TRUE(ptr_value.AllowsUnprotectedRead())) {
+ ::new (static_cast<void*>(&dst)) T(*static_cast<T*>(ptr_value.Ptr()));
+ return true;
+ }
+ return false;
+ }
+
+ alignas(MaskedPointer::RequiredAlignment()) alignas(
+ T) char buffer[sizeof(T)]{};
};
///////////////////////////////////////////////////////////////////////////////
@@ -425,6 +569,13 @@ struct DynValueDeleter {
class FlagState;
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
class FlagImpl final : public CommandLineFlag {
public:
constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op,
@@ -477,7 +628,7 @@ class FlagImpl final : public CommandLineFlag {
// Used in read/write operations to validate source/target has correct type.
// For example if flag is declared as absl::Flag<int> FLAGS_foo, a call to
// absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed
- // int. To do that we pass the "assumed" type id (which is deduced from type
+ // int. To do that we pass the assumed type id (which is deduced from type
// int) as an argument `type_id`, which is in turn is validated against the
// type id stored in flag object by flag definition statement.
void AssertValidType(FlagFastTypeId type_id,
@@ -498,17 +649,13 @@ class FlagImpl final : public CommandLineFlag {
void Init();
// Offset value access methods. One per storage kind. These methods to not
- // respect const correctness, so be very carefull using them.
+ // respect const correctness, so be very careful using them.
// This is a shared helper routine which encapsulates most of the magic. Since
// it is only used inside the three routines below, which are defined in
// flag.cc, we can define it in that file as well.
template <typename StorageT>
StorageT* OffsetValue() const;
- // This is an accessor for a value stored in an aligned buffer storage
- // used for non-trivially-copyable data types.
- // Returns a mutable pointer to the start of a buffer.
- void* AlignedBufferValue() const;
// The same as above, but used for sequencelock-protected storage.
std::atomic<uint64_t>* AtomicBufferValue() const;
@@ -517,13 +664,16 @@ class FlagImpl final : public CommandLineFlag {
// mutable reference to an atomic value.
std::atomic<int64_t>& OneWordValue() const;
+ std::atomic<MaskedPointer>& PtrStorage() const;
+
// Attempts to parse supplied `value` string. If parsing is successful,
// returns new value. Otherwise returns nullptr.
std::unique_ptr<void, DynValueDeleter> TryParse(absl::string_view value,
std::string& err) const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
// Stores the flag value based on the pointer to the source.
- void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
+ void StoreValue(const void* src, ValueSource source)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
// Copy the flag data, protected by `seq_lock_` into `dst`.
//
@@ -579,7 +729,7 @@ class FlagImpl final : public CommandLineFlag {
const char* const name_;
// The file name where ABSL_FLAG resides.
const char* const filename_;
- // Type-specific operations "vtable".
+ // Type-specific operations vtable.
const FlagOpFn op_;
// Help message literal or function to generate it.
const FlagHelpMsg help_;
@@ -624,6 +774,9 @@ class FlagImpl final : public CommandLineFlag {
// problems.
alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)];
};
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
///////////////////////////////////////////////////////////////////////////////
// The Flag object parameterized by the flag's value type. This class implements
@@ -711,16 +864,21 @@ class FlagImplPeer {
// Implementation of Flag value specific operations routine.
template <typename T>
void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
+ struct AlignedSpace {
+ alignas(MaskedPointer::RequiredAlignment()) alignas(T) char buf[sizeof(T)];
+ };
+ using Allocator = std::allocator<AlignedSpace>;
switch (op) {
case FlagOp::kAlloc: {
- std::allocator<T> alloc;
- return std::allocator_traits<std::allocator<T>>::allocate(alloc, 1);
+ Allocator alloc;
+ return std::allocator_traits<Allocator>::allocate(alloc, 1);
}
case FlagOp::kDelete: {
T* p = static_cast<T*>(v2);
p->~T();
- std::allocator<T> alloc;
- std::allocator_traits<std::allocator<T>>::deallocate(alloc, p, 1);
+ Allocator alloc;
+ std::allocator_traits<Allocator>::deallocate(
+ alloc, reinterpret_cast<AlignedSpace*>(p), 1);
return nullptr;
}
case FlagOp::kCopy:
@@ -754,8 +912,7 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
// Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the
// offset of the data.
size_t round_to = alignof(FlagValue<T>);
- size_t offset =
- (sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
+ size_t offset = (sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
return reinterpret_cast<void*>(offset);
}
}
@@ -770,7 +927,8 @@ struct FlagRegistrarEmpty {};
template <typename T, bool do_register>
class FlagRegistrar {
public:
- explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
+ constexpr explicit FlagRegistrar(Flag<T>& flag, const char* filename)
+ : flag_(flag) {
if (do_register)
flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
}
@@ -780,15 +938,19 @@ class FlagRegistrar {
return *this;
}
- // Make the registrar "die" gracefully as an empty struct on a line where
+ // Makes the registrar die gracefully as an empty struct on a line where
// registration happens. Registrar objects are intended to live only as
// temporary.
- operator FlagRegistrarEmpty() const { return {}; } // NOLINT
+ constexpr operator FlagRegistrarEmpty() const { return {}; } // NOLINT
private:
Flag<T>& flag_; // Flag being registered (not owned).
};
+///////////////////////////////////////////////////////////////////////////////
+// Test only API
+uint64_t NumLeakedFlagValues();
+
} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
index 841921a926..ea856ff9c1 100644
--- a/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
+++ b/contrib/restricted/abseil-cpp/absl/flags/reflection.cc
@@ -217,6 +217,13 @@ void FinalizeRegistry() {
namespace {
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
class RetiredFlagObj final : public CommandLineFlag {
public:
constexpr RetiredFlagObj(const char* name, FlagFastTypeId type_id)
@@ -276,6 +283,9 @@ class RetiredFlagObj final : public CommandLineFlag {
const char* const name_;
const FlagFastTypeId type_id_;
};
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
} // namespace
diff --git a/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h b/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
index 68d882532e..3acb9fd089 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/any_invocable.h
@@ -34,6 +34,7 @@
#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
#include <cstddef>
+#include <functional>
#include <initializer_list>
#include <type_traits>
#include <utility>
@@ -98,9 +99,9 @@ ABSL_NAMESPACE_BEGIN
// `AnyInvocable` also properly respects `const` qualifiers, reference
// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as
// part of the user-specified function type (e.g.
-// `AnyInvocable<void()&& const noexcept>`). These qualifiers will be applied to
-// the `AnyInvocable` object's `operator()`, and the underlying invocable must
-// be compatible with those qualifiers.
+// `AnyInvocable<void() const && noexcept>`). These qualifiers will be applied
+// to the `AnyInvocable` object's `operator()`, and the underlying invocable
+// must be compatible with those qualifiers.
//
// Comparison of const and non-const function types:
//
@@ -151,6 +152,12 @@ ABSL_NAMESPACE_BEGIN
//
// Attempting to call `absl::AnyInvocable` multiple times in such a case
// results in undefined behavior.
+//
+// Invoking an empty `absl::AnyInvocable` results in undefined behavior:
+//
+// // Create an empty instance using the default constructor.
+// AnyInvocable<void()> empty;
+// empty(); // WARNING: Undefined behavior!
template <class Sig>
class AnyInvocable : private internal_any_invocable::Impl<Sig> {
private:
@@ -167,6 +174,7 @@ class AnyInvocable : private internal_any_invocable::Impl<Sig> {
// Constructors
// Constructs the `AnyInvocable` in an empty state.
+ // Invoking it results in undefined behavior.
AnyInvocable() noexcept = default;
AnyInvocable(std::nullptr_t) noexcept {} // NOLINT
@@ -277,6 +285,8 @@ class AnyInvocable : private internal_any_invocable::Impl<Sig> {
// In other words:
// std::function<void()> f; // empty
// absl::AnyInvocable<void()> a = std::move(f); // not empty
+ //
+ // Invoking an empty `AnyInvocable` results in undefined behavior.
explicit operator bool() const noexcept { return this->HasValue(); }
// Invokes the target object of `*this`. `*this` must not be empty.
diff --git a/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h b/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
index b04436d1d6..c2d8cd4727 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
+++ b/contrib/restricted/abseil-cpp/absl/functional/internal/any_invocable.h
@@ -19,11 +19,11 @@
////////////////////////////////////////////////////////////////////////////////
// //
-// This implementation of the proposed `any_invocable` uses an approach that //
-// chooses between local storage and remote storage for the contained target //
-// object based on the target object's size, alignment requirements, and //
-// whether or not it has a nothrow move constructor. Additional optimizations //
-// are performed when the object is a trivially copyable type [basic.types]. //
+// This implementation chooses between local storage and remote storage for //
+// the contained target object based on the target object's size, alignment //
+// requirements, and whether or not it has a nothrow move constructor. //
+// Additional optimizations are performed when the object is a trivially //
+// copyable type [basic.types]. //
// //
// There are three datamembers per `AnyInvocable` instance //
// //
@@ -39,7 +39,7 @@
// target object, directly returning the result. //
// //
// When in the logically empty state, the manager function is an empty //
-// function and the invoker function is one that would be undefined-behavior //
+// function and the invoker function is one that would be undefined behavior //
// to call. //
// //
// An additional optimization is performed when converting from one //
@@ -58,12 +58,12 @@
#include <cstring>
#include <exception>
#include <functional>
-#include <initializer_list>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
diff --git a/contrib/restricted/abseil-cpp/absl/functional/ya.make b/contrib/restricted/abseil-cpp/absl/functional/ya.make
index ac5695f072..467248d279 100644
--- a/contrib/restricted/abseil-cpp/absl/functional/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/functional/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
NO_RUNTIME()
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
index 11451e575c..93906ef872 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.cc
@@ -61,7 +61,7 @@ constexpr uint64_t kHashSalt[5] = {
uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
size_t len) {
- return LowLevelHash(data, len, Seed(), kHashSalt);
+ return LowLevelHashLenGt16(data, len, Seed(), kHashSalt);
}
} // namespace hash_internal
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
index f4a94f9129..03bf183905 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/hash.h
@@ -24,6 +24,15 @@
#include <TargetConditionals.h>
#endif
+#include "absl/base/config.h"
+
+// For feature testing and determining which headers can be included.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#include <version>
+#else
+#include <ciso646>
+#endif
+
#include <algorithm>
#include <array>
#include <bitset>
@@ -47,7 +56,6 @@
#include <utility>
#include <vector>
-#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
@@ -61,7 +69,7 @@
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
!defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)
#include <filesystem> // NOLINT
#endif
@@ -591,7 +599,9 @@ H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
!defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY) && \
(!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) || \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000)
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) && \
+ (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) || \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500)
#define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
index b5db0b8960..6dc71cf7cd 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.cc
@@ -14,6 +14,9 @@
#include "absl/hash/internal/low_level_hash.h"
+#include <cstddef>
+#include <cstdint>
+
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
@@ -28,19 +31,22 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
}
-uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
- const uint64_t salt[5]) {
+uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]) {
// Prefetch the cacheline that data resides in.
PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
+ const uint8_t* last_16_ptr = ptr + starting_length - 16;
uint64_t current_state = seed ^ salt[0];
if (len > 64) {
// If we have more than 64 bytes, we're going to handle chunks of 64
// bytes at a time. We're going to build up two separate hash states
// which we will then hash together.
- uint64_t duplicated_state = current_state;
+ uint64_t duplicated_state0 = current_state;
+ uint64_t duplicated_state1 = current_state;
+ uint64_t duplicated_state2 = current_state;
do {
// Always prefetch the next cacheline.
@@ -55,40 +61,72 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
- uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
- uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
- current_state = (cs0 ^ cs1);
+ current_state = Mix(a ^ salt[1], b ^ current_state);
+ duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
- uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
- uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
- duplicated_state = (ds0 ^ ds1);
+ duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
+ duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
ptr += 64;
len -= 64;
} while (len > 64);
- current_state = current_state ^ duplicated_state;
+ current_state = (current_state ^ duplicated_state0) ^
+ (duplicated_state1 + duplicated_state2);
}
// We now have a data `ptr` with at most 64 bytes and the current state
// of the hashing state machine stored in current_state.
- while (len > 16) {
+ if (len > 32) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+ uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+ uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
- current_state = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+ current_state = cs0 ^ cs1;
+
+ ptr += 32;
+ len -= 32;
+ }
- ptr += 16;
- len -= 16;
+ // We now have a data `ptr` with at most 32 bytes and the current state
+ // of the hashing state machine stored in current_state.
+ if (len > 16) {
+ uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+
+ current_state = Mix(a ^ salt[1], b ^ current_state);
}
- // We now have a data `ptr` with at most 16 bytes.
+ // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can
+ // safely read from `ptr + len - 16`.
+ uint64_t a = absl::base_internal::UnalignedLoad64(last_16_ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(last_16_ptr + 8);
+
+ return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
+}
+
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]) {
+ if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
+
+ // Prefetch the cacheline that data resides in.
+ PrefetchToLocalCache(data);
+ const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ uint64_t starting_length = static_cast<uint64_t>(len);
+ uint64_t current_state = seed ^ salt[0];
+ if (len == 0) return current_state;
+
uint64_t a = 0;
uint64_t b = 0;
+
+ // We now have a data `ptr` with at least 1 and at most 16 bytes.
if (len > 8) {
// When we have at least 9 and at most 16 bytes, set A to the first 64
- // bits of the input and B to the last 64 bits of the input. Yes, they will
- // overlap in the middle if we are working with less than the full 16
+ // bits of the input and B to the last 64 bits of the input. Yes, they
+ // will overlap in the middle if we are working with less than the full 16
// bytes.
a = absl::base_internal::UnalignedLoad64(ptr);
b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
@@ -97,20 +135,14 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
// bits and B to the last 32 bits.
a = absl::base_internal::UnalignedLoad32(ptr);
b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
- } else if (len > 0) {
- // If we have at least 1 and at most 3 bytes, read all of the provided
- // bits into A, with some adjustments.
- a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) |
- ptr[len - 1]);
- b = 0;
} else {
- a = 0;
- b = 0;
+ // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the
+ // other byte into B, with some adjustments.
+ a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
+ b = static_cast<uint64_t>(ptr[len >> 1]);
}
- uint64_t w = Mix(a ^ salt[1], b ^ current_state);
- uint64_t z = salt[1] ^ starting_length;
- return Mix(w, z);
+ return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
}
} // namespace hash_internal
diff --git a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
index 439968aa98..d460e35198 100644
--- a/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
+++ b/contrib/restricted/abseil-cpp/absl/hash/internal/low_level_hash.h
@@ -43,6 +43,10 @@ namespace hash_internal {
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]);
+// Same as above except the length must be greater than 16.
+uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]);
+
} // namespace hash_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/log/absl_vlog_is_on.h b/contrib/restricted/abseil-cpp/absl/log/absl_vlog_is_on.h
index 29096b4857..6bf6c41339 100644
--- a/contrib/restricted/abseil-cpp/absl/log/absl_vlog_is_on.h
+++ b/contrib/restricted/abseil-cpp/absl/log/absl_vlog_is_on.h
@@ -46,12 +46,12 @@
// Files which do not match any pattern in `--vmodule` use the value of `--v` as
// their effective verbosity level. The default is 0.
//
-// SetVLOGLevel helper function is provided to do limited dynamic control over
+// SetVLogLevel helper function is provided to do limited dynamic control over
// V-logging by appending to `--vmodule`. Because these go at the beginning of
// the list, they take priority over any globs previously added.
//
// Resetting --vmodule will override all previous modifications to `--vmodule`,
-// including via SetVLOGLevel.
+// including via SetVLogLevel.
#ifndef ABSL_LOG_ABSL_VLOG_IS_ON_H_
#define ABSL_LOG_ABSL_VLOG_IS_ON_H_
@@ -77,7 +77,7 @@
// Each ABSL_VLOG_IS_ON call site gets its own VLogSite that registers with the
// global linked list of sites to asynchronously update its verbosity level on
// changes to --v or --vmodule. The verbosity can also be set by manually
-// calling SetVLOGLevel.
+// calling SetVLogLevel.
//
// ABSL_VLOG_IS_ON is not async signal safe, but it is guaranteed not to
// allocate new memory.
diff --git a/contrib/restricted/abseil-cpp/absl/log/die_if_null.h b/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
index 127a9ac882..f773aa854e 100644
--- a/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
+++ b/contrib/restricted/abseil-cpp/absl/log/die_if_null.h
@@ -55,7 +55,7 @@ namespace log_internal {
// `line` location. Called when `ABSL_DIE_IF_NULL` fails. Calling this function
// generates less code than its implementation would if inlined, for a slight
// code size reduction each time `ABSL_DIE_IF_NULL` is called.
-ABSL_ATTRIBUTE_NORETURN ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull(
+[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull(
const char* file, int line, const char* exprtext);
// Helper for `ABSL_DIE_IF_NULL`.
diff --git a/contrib/restricted/abseil-cpp/absl/log/globals.h b/contrib/restricted/abseil-cpp/absl/log/globals.h
index b36e47e6d1..4feec4078f 100644
--- a/contrib/restricted/abseil-cpp/absl/log/globals.h
+++ b/contrib/restricted/abseil-cpp/absl/log/globals.h
@@ -140,7 +140,7 @@ void ClearLogBacktraceLocation();
//
// This option tells the logging library that every logged message
// should include the prefix (severity, date, time, PID, etc.)
-
+//
// ShouldPrependLogPrefix()
//
// Returns the value of the Prepend Log Prefix option.
@@ -154,25 +154,38 @@ ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix();
void EnableLogPrefix(bool on_off);
//------------------------------------------------------------------------------
-// Set Global VLOG Level
+// `VLOG` Configuration
//------------------------------------------------------------------------------
//
-// Sets the global `(ABSL_)VLOG(_IS_ON)` level to `log_level`. This level is
-// applied to any sites whose filename doesn't match any `module_pattern`.
-// Returns the prior value.
-inline int SetGlobalVLogLevel(int log_level) {
- return absl::log_internal::UpdateGlobalVLogLevel(log_level);
+// These methods set the `(ABSL_)VLOG(_IS_ON)` threshold. They allow
+// programmatic control of the thresholds set by the --v and --vmodule flags.
+//
+// Only `VLOG`s with a severity level LESS THAN OR EQUAL TO the threshold will
+// be evaluated.
+//
+// For example, if the threshold is 2, then:
+//
+// VLOG(2) << "This message will be logged.";
+// VLOG(3) << "This message will NOT be logged.";
+//
+// The default threshold is 0. Since `VLOG` levels must not be negative, a
+// negative threshold value will turn off all VLOGs.
+
+// SetGlobalVLogLevel()
+//
+// Sets the global `VLOG` level to threshold. Returns the previous global
+// threshold.
+inline int SetGlobalVLogLevel(int threshold) {
+ return absl::log_internal::UpdateGlobalVLogLevel(threshold);
}
-//------------------------------------------------------------------------------
-// Set VLOG Level
-//------------------------------------------------------------------------------
+// SetVLogLevel()
//
-// Sets `(ABSL_)VLOG(_IS_ON)` level for `module_pattern` to `log_level`. This
-// allows programmatic control of what is normally set by the --vmodule flag.
-// Returns the level that previously applied to `module_pattern`.
-inline int SetVLogLevel(absl::string_view module_pattern, int log_level) {
- return absl::log_internal::PrependVModule(module_pattern, log_level);
+// Sets the `VLOG` threshold for all files that match `module_pattern`,
+// overwriting any prior value. Files that don't match aren't affected.
+// Returns the threshold that previously applied to `module_pattern`.
+inline int SetVLogLevel(absl::string_view module_pattern, int threshold) {
+ return absl::log_internal::PrependVModule(module_pattern, threshold);
}
//------------------------------------------------------------------------------
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
index f4b67647a6..23c4a3b205 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.cc
@@ -16,6 +16,10 @@
#include <string.h>
+#include <ostream>
+
+#include "absl/strings/string_view.h"
+
#ifdef _MSC_VER
#define strcasecmp _stricmp
#else
@@ -113,6 +117,22 @@ DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true)
DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
#undef DEFINE_CHECK_STROP_IMPL
+namespace detect_specialization {
+
+StringifySink::StringifySink(std::ostream& os) : os_(os) {}
+
+void StringifySink::Append(absl::string_view text) { os_ << text; }
+
+void StringifySink::Append(size_t length, char ch) {
+ for (size_t i = 0; i < length; ++i) os_.put(ch);
+}
+
+void AbslFormatFlush(StringifySink* sink, absl::string_view text) {
+ sink->Append(text);
+}
+
+} // namespace detect_specialization
+
} // namespace log_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
index 11f0f4078b..215922079c 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/check_op.h
@@ -24,9 +24,11 @@
#include <stdint.h>
+#include <cstddef>
#include <ostream>
#include <sstream>
#include <string>
+#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
@@ -35,6 +37,8 @@
#include "absl/log/internal/nullguard.h"
#include "absl/log/internal/nullstream.h"
#include "absl/log/internal/strip.h"
+#include "absl/strings/has_absl_stringify.h"
+#include "absl/strings/string_view.h"
// `ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL` wraps string literals that
// should be stripped when `ABSL_MIN_LOG_LEVEL` exceeds `kFatal`.
@@ -58,13 +62,13 @@
#endif
#define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val1_text, val2, val2_text) \
- while ( \
- ::std::string* absl_log_internal_check_op_result ABSL_ATTRIBUTE_UNUSED = \
- ::absl::log_internal::name##Impl( \
- ::absl::log_internal::GetReferenceableValue(val1), \
- ::absl::log_internal::GetReferenceableValue(val2), \
- ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val1_text \
- " " #op " " val2_text))) \
+ while (::std::string* absl_log_internal_check_op_result \
+ ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG = \
+ ::absl::log_internal::name##Impl( \
+ ::absl::log_internal::GetReferenceableValue(val1), \
+ ::absl::log_internal::GetReferenceableValue(val2), \
+ ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL( \
+ val1_text " " #op " " val2_text))) \
ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \
ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_op_result).InternalStream()
#define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val1_text, val2, \
@@ -287,6 +291,44 @@ decltype(detect_specialization::operator<<(std::declval<std::ostream&>(),
std::declval<const T&>()))
Detect(char);
+// A sink for AbslStringify which redirects everything to a std::ostream.
+class StringifySink {
+ public:
+ explicit StringifySink(std::ostream& os ABSL_ATTRIBUTE_LIFETIME_BOUND);
+
+ void Append(absl::string_view text);
+ void Append(size_t length, char ch);
+ friend void AbslFormatFlush(StringifySink* sink, absl::string_view text);
+
+ private:
+ std::ostream& os_;
+};
+
+// Wraps a type implementing AbslStringify, and implements operator<<.
+template <typename T>
+class StringifyToStreamWrapper {
+ public:
+ explicit StringifyToStreamWrapper(const T& v ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : v_(v) {}
+
+ friend std::ostream& operator<<(std::ostream& os,
+ const StringifyToStreamWrapper& wrapper) {
+ StringifySink sink(os);
+ AbslStringify(sink, wrapper.v_);
+ return os;
+ }
+
+ private:
+ const T& v_;
+};
+
+// This overload triggers when T implements AbslStringify.
+// StringifyToStreamWrapper is used to allow MakeCheckOpString to use
+// operator<<.
+template <typename T>
+std::enable_if_t<HasAbslStringify<T>::value,
+ StringifyToStreamWrapper<T>>
+Detect(...); // Ellipsis has lowest preference when int passed.
} // namespace detect_specialization
template <typename T>
@@ -342,20 +384,20 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*);
// `(int, int)` override works around the issue that the compiler will not
// instantiate the template version of the function on values of unnamed enum
// type.
-#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op) \
- template <typename T1, typename T2> \
- inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2, \
- const char* exprtext) { \
- using U1 = CheckOpStreamType<T1>; \
- using U2 = CheckOpStreamType<T2>; \
- return ABSL_PREDICT_TRUE(v1 op v2) \
- ? nullptr \
- : ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, v1, v2, \
- exprtext); \
- } \
- inline constexpr ::std::string* name##Impl(int v1, int v2, \
- const char* exprtext) { \
- return name##Impl<int, int>(v1, v2, exprtext); \
+#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op) \
+ template <typename T1, typename T2> \
+ inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2, \
+ const char* exprtext) { \
+ using U1 = CheckOpStreamType<T1>; \
+ using U2 = CheckOpStreamType<T2>; \
+ return ABSL_PREDICT_TRUE(v1 op v2) \
+ ? nullptr \
+ : ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, U1(v1), \
+ U2(v2), exprtext); \
+ } \
+ inline constexpr ::std::string* name##Impl(int v1, int v2, \
+ const char* exprtext) { \
+ return name##Impl<int, int>(v1, v2, exprtext); \
}
ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_EQ, ==)
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
index 645f3c23ea..9dc15db428 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/conditions.h
@@ -230,8 +230,8 @@ class LogEveryNSecState final {
// Helper routines to abort the application quietly
-ABSL_ATTRIBUTE_NORETURN inline void AbortQuietly() { abort(); }
-ABSL_ATTRIBUTE_NORETURN inline void ExitQuietly() { _exit(1); }
+[[noreturn]] inline void AbortQuietly() { abort(); }
+[[noreturn]] inline void ExitQuietly() { _exit(1); }
} // namespace log_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/log_impl.h b/contrib/restricted/abseil-cpp/absl/log/internal/log_impl.h
index 99de6dbb24..a67f2f31b4 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/log_impl.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/log_impl.h
@@ -35,14 +35,14 @@
#ifndef NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#else
#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#endif
-// The `switch` ensures that this expansion is the begnning of a statement (as
+// The `switch` ensures that this expansion is the beginning of a statement (as
// opposed to an expression). The use of both `case 0` and `default` is to
// suppress a compiler warning.
#define ABSL_LOG_INTERNAL_VLOG_IMPL(verbose_level) \
@@ -58,7 +58,7 @@
switch (const int absl_logging_internal_verbose_level = (verbose_level)) \
case 0: \
default: \
- ABSL_LOG_INTERNAL_LOG_IF_IMPL( \
+ ABSL_LOG_INTERNAL_DLOG_IF_IMPL( \
_INFO, ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level)) \
.WithVerbosity(absl_logging_internal_verbose_level)
#else
@@ -66,7 +66,7 @@
switch (const int absl_logging_internal_verbose_level = (verbose_level)) \
case 0: \
default: \
- ABSL_LOG_INTERNAL_LOG_IF_IMPL( \
+ ABSL_LOG_INTERNAL_DLOG_IF_IMPL( \
_INFO, false && ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level)) \
.WithVerbosity(absl_logging_internal_verbose_level)
#endif
@@ -82,11 +82,11 @@
#ifndef NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#else
#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false && (condition)) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#endif
// ABSL_LOG_EVERY_N
@@ -132,36 +132,36 @@
#ifndef NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
- (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
- (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
- (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
- (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#else // def NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
- (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
- (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
- (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
- (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#endif // def NDEBUG
#define ABSL_LOG_INTERNAL_VLOG_EVERY_N_IMPL(verbose_level, n) \
@@ -243,40 +243,40 @@
#ifndef NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
n_seconds) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#else // def NDEBUG
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
- EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
- FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
- EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
EveryNSec, n_seconds) \
- ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+ ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
#endif // def NDEBUG
#endif // ABSL_LOG_INTERNAL_LOG_IMPL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
index 10ac245373..4e9b08af64 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.cc
@@ -27,6 +27,7 @@
#include <algorithm>
#include <array>
#include <atomic>
+#include <ios>
#include <memory>
#include <ostream>
#include <string>
@@ -67,7 +68,14 @@ namespace log_internal {
namespace {
// message `logging.proto.Event`
enum EventTag : uint8_t {
+ kFileName = 2,
+ kFileLine = 3,
+ kTimeNsecs = 4,
+ kSeverity = 5,
+ kThreadId = 6,
kValue = 7,
+ kSequenceNumber = 9,
+ kThreadName = 10,
};
// message `logging.proto.Value`
@@ -100,6 +108,23 @@ bool PrintValue(absl::Span<char>& dst, absl::Span<const char> buf) {
return true;
}
+// See `logging.proto.Severity`
+int32_t ProtoSeverity(absl::LogSeverity severity, int verbose_level) {
+ switch (severity) {
+ case absl::LogSeverity::kInfo:
+ if (verbose_level == absl::LogEntry::kNoVerbosityLevel) return 800;
+ return 600 - verbose_level;
+ case absl::LogSeverity::kWarning:
+ return 900;
+ case absl::LogSeverity::kError:
+ return 950;
+ case absl::LogSeverity::kFatal:
+ return 1100;
+ default:
+ return 800;
+ }
+}
+
absl::string_view Basename(absl::string_view filepath) {
#ifdef _WIN32
size_t path = filepath.find_last_of("/\\");
@@ -145,26 +170,37 @@ struct LogMessage::LogMessageData final {
// A `logging.proto.Event` proto message is built into `encoded_buf`.
std::array<char, kLogMessageBufferSize> encoded_buf;
- // `encoded_remaining` is the suffix of `encoded_buf` that has not been filled
- // yet. If a datum to be encoded does not fit into `encoded_remaining` and
- // cannot be truncated to fit, the size of `encoded_remaining` will be zeroed
- // to prevent encoding of any further data. Note that in this case its data()
- // pointer will not point past the end of `encoded_buf`.
- absl::Span<char> encoded_remaining;
+ // `encoded_remaining()` is the suffix of `encoded_buf` that has not been
+ // filled yet. If a datum to be encoded does not fit into
+ // `encoded_remaining()` and cannot be truncated to fit, the size of
+ // `encoded_remaining()` will be zeroed to prevent encoding of any further
+ // data. Note that in this case its `data()` pointer will not point past the
+ // end of `encoded_buf`.
+ // The first use of `encoded_remaining()` is our chance to record metadata
+ // after any modifications (e.g. by `AtLocation()`) but before any data have
+ // been recorded. We want to record metadata before data so that data are
+ // preferentially truncated if we run out of buffer.
+ absl::Span<char>& encoded_remaining() {
+ if (encoded_remaining_actual_do_not_use_directly.data() == nullptr) {
+ encoded_remaining_actual_do_not_use_directly =
+ absl::MakeSpan(encoded_buf);
+ InitializeEncodingAndFormat();
+ }
+ return encoded_remaining_actual_do_not_use_directly;
+ }
+ absl::Span<char> encoded_remaining_actual_do_not_use_directly;
// A formatted string message is built in `string_buf`.
std::array<char, kLogMessageBufferSize> string_buf;
+ void InitializeEncodingAndFormat();
void FinalizeEncodingAndFormat();
};
LogMessage::LogMessageData::LogMessageData(const char* file, int line,
absl::LogSeverity severity,
absl::Time timestamp)
- : extra_sinks_only(false),
- manipulated(nullptr),
- // This `absl::MakeSpan` silences spurious -Wuninitialized from GCC:
- encoded_remaining(absl::MakeSpan(encoded_buf)) {
+ : extra_sinks_only(false), manipulated(nullptr) {
// Legacy defaults for LOG's ostream:
manipulated.setf(std::ios_base::showbase | std::ios_base::boolalpha);
entry.full_filename_ = file;
@@ -177,13 +213,25 @@ LogMessage::LogMessageData::LogMessageData(const char* file, int line,
entry.tid_ = absl::base_internal::GetCachedTID();
}
+void LogMessage::LogMessageData::InitializeEncodingAndFormat() {
+ EncodeStringTruncate(EventTag::kFileName, entry.source_filename(),
+ &encoded_remaining());
+ EncodeVarint(EventTag::kFileLine, entry.source_line(), &encoded_remaining());
+ EncodeVarint(EventTag::kTimeNsecs, absl::ToUnixNanos(entry.timestamp()),
+ &encoded_remaining());
+ EncodeVarint(EventTag::kSeverity,
+ ProtoSeverity(entry.log_severity(), entry.verbosity()),
+ &encoded_remaining());
+ EncodeVarint(EventTag::kThreadId, entry.tid(), &encoded_remaining());
+}
+
void LogMessage::LogMessageData::FinalizeEncodingAndFormat() {
- // Note that `encoded_remaining` may have zero size without pointing past the
- // end of `encoded_buf`, so the difference between `data()` pointers is used
- // to compute the size of `encoded_data`.
+ // Note that `encoded_remaining()` may have zero size without pointing past
+ // the end of `encoded_buf`, so the difference between `data()` pointers is
+ // used to compute the size of `encoded_data`.
absl::Span<const char> encoded_data(
encoded_buf.data(),
- static_cast<size_t>(encoded_remaining.data() - encoded_buf.data()));
+ static_cast<size_t>(encoded_remaining().data() - encoded_buf.data()));
// `string_remaining` is the suffix of `string_buf` that has not been filled
// yet.
absl::Span<char> string_remaining(string_buf);
@@ -211,7 +259,6 @@ void LogMessage::LogMessageData::FinalizeEncodingAndFormat() {
if (PrintValue(string_remaining, field.bytes_value())) continue;
break;
}
- break;
}
auto chars_written =
static_cast<size_t>(string_remaining.data() - string_buf.data());
@@ -413,7 +460,7 @@ void LogMessage::Flush() {
data_->FinalizeEncodingAndFormat();
data_->entry.encoding_ =
absl::string_view(data_->encoded_buf.data(),
- static_cast<size_t>(data_->encoded_remaining.data() -
+ static_cast<size_t>(data_->encoded_remaining().data() -
data_->encoded_buf.data()));
SendToLog();
}
@@ -421,7 +468,7 @@ void LogMessage::Flush() {
void LogMessage::SetFailQuietly() { data_->fail_quietly = true; }
LogMessage::OstreamView::OstreamView(LogMessageData& message_data)
- : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining) {
+ : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining()) {
// This constructor sets the `streambuf` up so that streaming into an attached
// ostream encodes string data in-place. To do that, we write appropriate
// headers into the buffer using a copy of the buffer view so that we can
@@ -444,8 +491,8 @@ LogMessage::OstreamView::~OstreamView() {
if (!string_start_.data()) {
// The second field header didn't fit. Whether the first one did or not, we
// shouldn't commit `encoded_remaining_copy_`, and we also need to zero the
- // size of `data_->encoded_remaining` so that no more data are encoded.
- data_.encoded_remaining.remove_suffix(data_.encoded_remaining.size());
+ // size of `data_->encoded_remaining()` so that no more data are encoded.
+ data_.encoded_remaining().remove_suffix(data_.encoded_remaining().size());
return;
}
const absl::Span<const char> contents(pbase(),
@@ -454,7 +501,7 @@ LogMessage::OstreamView::~OstreamView() {
encoded_remaining_copy_.remove_prefix(contents.size());
EncodeMessageLength(string_start_, &encoded_remaining_copy_);
EncodeMessageLength(message_start_, &encoded_remaining_copy_);
- data_.encoded_remaining = encoded_remaining_copy_;
+ data_.encoded_remaining() = encoded_remaining_copy_;
}
std::ostream& LogMessage::OstreamView::stream() { return data_.manipulated; }
@@ -521,13 +568,13 @@ void LogMessage::LogBacktraceIfNeeded() {
view.stream() << ") ";
}
-// Encodes into `data_->encoded_remaining` a partial `logging.proto.Event`
+// Encodes into `data_->encoded_remaining()` a partial `logging.proto.Event`
// containing the specified string data using a `Value` field appropriate to
// `str_type`. Truncates `str` if necessary, but emits nothing and marks the
// buffer full if even the field headers do not fit.
template <LogMessage::StringType str_type>
void LogMessage::CopyToEncodedBuffer(absl::string_view str) {
- auto encoded_remaining_copy = data_->encoded_remaining;
+ auto encoded_remaining_copy = data_->encoded_remaining();
auto start = EncodeMessageStart(
EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + str.size(),
&encoded_remaining_copy);
@@ -540,11 +587,11 @@ void LogMessage::CopyToEncodedBuffer(absl::string_view str) {
str, &encoded_remaining_copy)) {
// The string may have been truncated, but the field header fit.
EncodeMessageLength(start, &encoded_remaining_copy);
- data_->encoded_remaining = encoded_remaining_copy;
+ data_->encoded_remaining() = encoded_remaining_copy;
} else {
- // The field header(s) did not fit; zero `encoded_remaining` so we don't
+ // The field header(s) did not fit; zero `encoded_remaining()` so we don't
// write anything else later.
- data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+ data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size());
}
}
template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
@@ -553,7 +600,7 @@ template void LogMessage::CopyToEncodedBuffer<
LogMessage::StringType::kNotLiteral>(absl::string_view str);
template <LogMessage::StringType str_type>
void LogMessage::CopyToEncodedBuffer(char ch, size_t num) {
- auto encoded_remaining_copy = data_->encoded_remaining;
+ auto encoded_remaining_copy = data_->encoded_remaining();
auto value_start = EncodeMessageStart(
EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + num,
&encoded_remaining_copy);
@@ -566,11 +613,11 @@ void LogMessage::CopyToEncodedBuffer(char ch, size_t num) {
log_internal::AppendTruncated(ch, num, encoded_remaining_copy);
EncodeMessageLength(str_start, &encoded_remaining_copy);
EncodeMessageLength(value_start, &encoded_remaining_copy);
- data_->encoded_remaining = encoded_remaining_copy;
+ data_->encoded_remaining() = encoded_remaining_copy;
} else {
- // The field header(s) did not fit; zero `encoded_remaining` so we don't
+ // The field header(s) did not fit; zero `encoded_remaining()` so we don't
// write anything else later.
- data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+ data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size());
}
}
template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
@@ -578,6 +625,13 @@ template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
template void LogMessage::CopyToEncodedBuffer<
LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+// We intentionally don't return from these destructors. Disable MSVC's warning
+// about the destructor never returning as we do so intentionally here.
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {}
@@ -587,19 +641,29 @@ LogMessageFatal::LogMessageFatal(const char* file, int line,
*this << "Check failed: " << failure_msg << " ";
}
-// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
-// disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
LogMessageFatal::~LogMessageFatal() {
Flush();
FailWithoutStackTrace();
}
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(pop)
-#endif
+
+LogMessageDebugFatal::LogMessageDebugFatal(const char* file, int line)
+ : LogMessage(file, line, absl::LogSeverity::kFatal) {}
+
+LogMessageDebugFatal::~LogMessageDebugFatal() {
+ Flush();
+ FailWithoutStackTrace();
+}
+
+LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal(const char* file,
+ int line)
+ : LogMessage(file, line, absl::LogSeverity::kFatal) {
+ SetFailQuietly();
+}
+
+LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() {
+ Flush();
+ FailQuietly();
+}
LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {
@@ -608,17 +672,10 @@ LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line)
LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line,
absl::string_view failure_msg)
- : LogMessage(file, line, absl::LogSeverity::kFatal) {
- SetFailQuietly();
- *this << "Check failed: " << failure_msg << " ";
+ : LogMessageQuietlyFatal(file, line) {
+ *this << "Check failed: " << failure_msg << " ";
}
-// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
-// disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
LogMessageQuietlyFatal::~LogMessageQuietlyFatal() {
Flush();
FailQuietly();
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
index 4ecb8a1498..0c067da9ad 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/log_message.h
@@ -187,11 +187,11 @@ class LogMessage {
protected:
// Call `abort()` or similar to perform `LOG(FATAL)` crash. It is assumed
// that the caller has already generated and written the trace as appropriate.
- ABSL_ATTRIBUTE_NORETURN static void FailWithoutStackTrace();
+ [[noreturn]] static void FailWithoutStackTrace();
// Similar to `FailWithoutStackTrace()`, but without `abort()`. Terminates
// the process with an error exit code.
- ABSL_ATTRIBUTE_NORETURN static void FailQuietly();
+ [[noreturn]] static void FailQuietly();
// Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s.
// This might as well be inlined into `~LogMessage` except that
@@ -354,15 +354,34 @@ class LogMessageFatal final : public LogMessage {
LogMessageFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
LogMessageFatal(const char* file, int line,
absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
- ABSL_ATTRIBUTE_NORETURN ~LogMessageFatal();
+ [[noreturn]] ~LogMessageFatal();
};
+// `LogMessageDebugFatal` ensures the process will exit in failure after logging
+// this message. It matches LogMessageFatal but is not [[noreturn]] as it's used
+// for DLOG(FATAL) variants.
+class LogMessageDebugFatal final : public LogMessage {
+ public:
+ LogMessageDebugFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+ ~LogMessageDebugFatal();
+};
+
+class LogMessageQuietlyDebugFatal final : public LogMessage {
+ public:
+ // DLOG(QFATAL) calls this instead of LogMessageQuietlyFatal to make sure the
+ // destructor is not [[noreturn]] even if this is always FATAL as this is only
+ // invoked when DLOG() is enabled.
+ LogMessageQuietlyDebugFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+ ~LogMessageQuietlyDebugFatal();
+};
+
+// Used for LOG(QFATAL) to make sure it's properly understood as [[noreturn]].
class LogMessageQuietlyFatal final : public LogMessage {
public:
LogMessageQuietlyFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
LogMessageQuietlyFatal(const char* file, int line,
absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
- ABSL_ATTRIBUTE_NORETURN ~LogMessageQuietlyFatal();
+ [[noreturn]] ~LogMessageQuietlyFatal();
};
} // namespace log_internal
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h b/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
index 9266852eb9..973e91ab68 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/nullstream.h
@@ -117,16 +117,7 @@ class NullStreamMaybeFatal final : public NullStream {
class NullStreamFatal final : public NullStream {
public:
NullStreamFatal() = default;
- // ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
- // disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
- ABSL_ATTRIBUTE_NORETURN ~NullStreamFatal() { _exit(1); }
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(pop)
-#endif
+ [[noreturn]] ~NullStreamFatal() { _exit(1); }
};
} // namespace log_internal
diff --git a/contrib/restricted/abseil-cpp/absl/log/internal/strip.h b/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
index f8d2786939..3e5501040c 100644
--- a/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
+++ b/contrib/restricted/abseil-cpp/absl/log/internal/strip.h
@@ -20,6 +20,7 @@
#ifndef ABSL_LOG_INTERNAL_STRIP_H_
#define ABSL_LOG_INTERNAL_STRIP_H_
+#include "absl/base/attributes.h" // IWYU pragma: keep
#include "absl/base/log_severity.h"
#include "absl/log/internal/log_message.h"
#include "absl/log/internal/nullstream.h"
@@ -29,6 +30,16 @@
// of defines comes in three flavors: vanilla, plus two variants that strip some
// logging in subtly different ways for subtly different reasons (see below).
#if defined(STRIP_LOG) && STRIP_LOG
+
+// Attribute for marking variables used in implementation details of logging
+// macros as unused, but only when `STRIP_LOG` is defined.
+// With `STRIP_LOG` on, not marking them triggers `-Wunused-but-set-variable`,
+// With `STRIP_LOG` off, marking them triggers `-Wused-but-marked-unused`.
+//
+// TODO(b/290784225): Replace this macro with attribute [[maybe_unused]] when
+// Abseil stops supporting C++14.
+#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG ABSL_ATTRIBUTE_UNUSED
+
#define ABSL_LOGGING_INTERNAL_LOG_INFO ::absl::log_internal::NullStream()
#define ABSL_LOGGING_INTERNAL_LOG_WARNING ::absl::log_internal::NullStream()
#define ABSL_LOGGING_INTERNAL_LOG_ERROR ::absl::log_internal::NullStream()
@@ -38,10 +49,21 @@
::absl::log_internal::NullStreamMaybeFatal(::absl::kLogDebugFatal)
#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \
::absl::log_internal::NullStreamMaybeFatal(absl_log_internal_severity)
+
+// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`.
+#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \
+ ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal)
+#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \
+ ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal)
+
#define ABSL_LOG_INTERNAL_CHECK(failure_message) ABSL_LOGGING_INTERNAL_LOG_FATAL
#define ABSL_LOG_INTERNAL_QCHECK(failure_message) \
ABSL_LOGGING_INTERNAL_LOG_QFATAL
+
#else // !defined(STRIP_LOG) || !STRIP_LOG
+
+#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG
+
#define ABSL_LOGGING_INTERNAL_LOG_INFO \
::absl::log_internal::LogMessage( \
__FILE__, __LINE__, ::absl::log_internal::LogMessage::InfoTag{})
@@ -60,6 +82,13 @@
#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \
::absl::log_internal::LogMessage(__FILE__, __LINE__, \
absl_log_internal_severity)
+
+// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`.
+#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \
+ ::absl::log_internal::LogMessageDebugFatal(__FILE__, __LINE__)
+#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \
+ ::absl::log_internal::LogMessageQuietlyDebugFatal(__FILE__, __LINE__)
+
// These special cases dispatch to special-case constructors that allow us to
// avoid an extra function call and shrink non-LTO binaries by a percent or so.
#define ABSL_LOG_INTERNAL_CHECK(failure_message) \
@@ -69,4 +98,11 @@
failure_message)
#endif // !defined(STRIP_LOG) || !STRIP_LOG
+// This part of a non-fatal `DLOG`s expands the same as `LOG`.
+#define ABSL_LOGGING_INTERNAL_DLOG_INFO ABSL_LOGGING_INTERNAL_LOG_INFO
+#define ABSL_LOGGING_INTERNAL_DLOG_WARNING ABSL_LOGGING_INTERNAL_LOG_WARNING
+#define ABSL_LOGGING_INTERNAL_DLOG_ERROR ABSL_LOGGING_INTERNAL_LOG_ERROR
+#define ABSL_LOGGING_INTERNAL_DLOG_DFATAL ABSL_LOGGING_INTERNAL_LOG_DFATAL
+#define ABSL_LOGGING_INTERNAL_DLOG_LEVEL ABSL_LOGGING_INTERNAL_LOG_LEVEL
+
#endif // ABSL_LOG_INTERNAL_STRIP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/log/log.h b/contrib/restricted/abseil-cpp/absl/log/log.h
index b721b087cc..a4e1d1fe81 100644
--- a/contrib/restricted/abseil-cpp/absl/log/log.h
+++ b/contrib/restricted/abseil-cpp/absl/log/log.h
@@ -198,7 +198,6 @@
#define ABSL_LOG_LOG_H_
#include "absl/log/internal/log_impl.h"
-#include "absl/log/vlog_is_on.h" // IWYU pragma: export
// LOG()
//
@@ -234,6 +233,11 @@
//
// See vlog_is_on.h for further documentation, including the usage of the
// --vmodule flag to log at different levels in different source files.
+//
+// `VLOG` does not produce any output when verbose logging is not enabled.
+// However, simply testing whether verbose logging is enabled can be expensive.
+// If you don't intend to enable verbose logging in non-debug builds, consider
+// using `DVLOG` instead.
#define VLOG(severity) ABSL_LOG_INTERNAL_VLOG_IMPL(severity)
// `DVLOG` behaves like `VLOG` in debug mode (i.e. `#ifndef NDEBUG`).
diff --git a/contrib/restricted/abseil-cpp/absl/log/log_sink.h b/contrib/restricted/abseil-cpp/absl/log/log_sink.h
index 9bfa6f8624..2910070d51 100644
--- a/contrib/restricted/abseil-cpp/absl/log/log_sink.h
+++ b/contrib/restricted/abseil-cpp/absl/log/log_sink.h
@@ -32,15 +32,16 @@ ABSL_NAMESPACE_BEGIN
// `absl::LogSink` is an interface which can be extended to intercept and
// process particular messages (with `LOG.ToSinkOnly()` or
// `LOG.ToSinkAlso()`) or all messages (if registered with
-// `absl::AddLogSink`). Implementations must be thread-safe, and should take
-// care not to take any locks that might be held by the `LOG` caller.
+// `absl::AddLogSink`). Implementations must not take any locks that might be
+// held by the `LOG` caller.
class LogSink {
public:
virtual ~LogSink() = default;
// LogSink::Send()
//
- // `Send` is called synchronously during the log statement.
+ // `Send` is called synchronously during the log statement. `Send` must be
+ // thread-safe.
//
// It is safe to use `LOG` within an implementation of `Send`. `ToSinkOnly`
// and `ToSinkAlso` are safe in general but can be used to create an infinite
@@ -50,9 +51,15 @@ class LogSink {
// LogSink::Flush()
//
// Sinks that buffer messages should override this method to flush the buffer
- // and return.
+ // and return. `Flush` must be thread-safe.
virtual void Flush() {}
+ protected:
+ LogSink() = default;
+ // Implementations may be copyable and/or movable.
+ LogSink(const LogSink&) = default;
+ LogSink& operator=(const LogSink&) = default;
+
private:
// https://lld.llvm.org/missingkeyfunction.html#missing-key-function
virtual void KeyFunction() const final; // NOLINT(readability/inheritance)
diff --git a/contrib/restricted/abseil-cpp/absl/log/vlog_is_on.h b/contrib/restricted/abseil-cpp/absl/log/vlog_is_on.h
deleted file mode 100644
index 7898651380..0000000000
--- a/contrib/restricted/abseil-cpp/absl/log/vlog_is_on.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2022 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// File: log/vlog_is_on.h
-// -----------------------------------------------------------------------------
-//
-// This header defines the `VLOG_IS_ON()` macro that controls the
-// variable-verbosity conditional logging.
-//
-// It's used by `VLOG` in log.h, or it can also be used directly like this:
-//
-// if (VLOG_IS_ON(2)) {
-// foo_server.RecomputeStatisticsExpensive();
-// LOG(INFO) << foo_server.LastStatisticsAsString();
-// }
-//
-// Each source file has an effective verbosity level that's a non-negative
-// integer computed from the `--vmodule` and `--v` flags.
-// `VLOG_IS_ON(n)` is true, and `VLOG(n)` logs, if that effective verbosity
-// level is greater than or equal to `n`.
-//
-// `--vmodule` takes a comma-delimited list of key=value pairs. Each key is a
-// pattern matched against filenames, and the values give the effective severity
-// level applied to matching files. '?' and '*' characters in patterns are
-// interpreted as single-character and zero-or-more-character wildcards.
-// Patterns including a slash character are matched against full pathnames,
-// while those without are matched against basenames only. One suffix (i.e. the
-// last . and everything after it) is stripped from each filename prior to
-// matching, as is the special suffix "-inl".
-//
-// Files are matched against globs in `--vmodule` in order, and the first match
-// determines the verbosity level.
-//
-// Files which do not match any pattern in `--vmodule` use the value of `--v` as
-// their effective verbosity level. The default is 0.
-//
-// SetVLOGLevel helper function is provided to do limited dynamic control over
-// V-logging by appending to `--vmodule`. Because these go at the beginning of
-// the list, they take priority over any globs previously added.
-//
-// Resetting --vmodule will override all previous modifications to `--vmodule`,
-// including via SetVLOGLevel.
-
-#ifndef ABSL_LOG_VLOG_IS_ON_H_
-#define ABSL_LOG_VLOG_IS_ON_H_
-
-#include "absl/log/absl_vlog_is_on.h" // IWYU pragma: export
-
-// IWYU pragma: private, include "absl/log/log.h"
-
-// Each VLOG_IS_ON call site gets its own VLogSite that registers with the
-// global linked list of sites to asynchronously update its verbosity level on
-// changes to --v or --vmodule. The verbosity can also be set by manually
-// calling SetVLOGLevel.
-//
-// VLOG_IS_ON is not async signal safe, but it is guaranteed not to allocate
-// new memory.
-#define VLOG_IS_ON(verbose_level) ABSL_VLOG_IS_ON(verbose_level)
-
-#endif // ABSL_LOG_VLOG_IS_ON_H_
diff --git a/contrib/restricted/abseil-cpp/absl/memory/ya.make b/contrib/restricted/abseil-cpp/absl/memory/ya.make
index bdd57c2b35..04e459b47e 100644
--- a/contrib/restricted/abseil-cpp/absl/memory/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/memory/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
PEERDIR(
contrib/restricted/abseil-cpp/absl/meta
diff --git a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
index cf71164b38..ded55820a3 100644
--- a/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
+++ b/contrib/restricted/abseil-cpp/absl/meta/type_traits.h
@@ -37,11 +37,21 @@
#include <cstddef>
#include <functional>
+#include <string>
#include <type_traits>
+#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#ifdef __cpp_lib_span
+#include <span> // NOLINT(build/c++20)
+#endif
+
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
// feature.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
@@ -152,8 +162,8 @@ template <typename... Ts>
struct disjunction : std::false_type {};
template <typename T, typename... Ts>
-struct disjunction<T, Ts...> :
- std::conditional<T::value, T, disjunction<Ts...>>::type {};
+struct disjunction<T, Ts...>
+ : std::conditional<T::value, T, disjunction<Ts...>>::type {};
template <typename T>
struct disjunction<T> : T {};
@@ -279,27 +289,6 @@ using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
-ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
-namespace type_traits_internal {
-// This trick to retrieve a default alignment is necessary for our
-// implementation of aligned_storage_t to be consistent with any
-// implementation of std::aligned_storage.
-template <size_t Len, typename T = std::aligned_storage<Len>>
-struct default_alignment_of_aligned_storage;
-
-template <size_t Len, size_t Align>
-struct default_alignment_of_aligned_storage<
- Len, std::aligned_storage<Len, Align>> {
- static constexpr size_t value = Align;
-};
-} // namespace type_traits_internal
-
-// TODO(b/260219225): std::aligned_storage(_t) is deprecated in C++23.
-template <size_t Len, size_t Align = type_traits_internal::
- default_alignment_of_aligned_storage<Len>::value>
-using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
-ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
-
template <typename T>
using decay_t = typename std::decay<T>::type;
@@ -315,22 +304,23 @@ using common_type_t = typename std::common_type<T...>::type;
template <typename T>
using underlying_type_t = typename std::underlying_type<T>::type;
-
namespace type_traits_internal {
#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
(defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
// std::result_of is deprecated (C++17) or removed (C++20)
-template<typename> struct result_of;
-template<typename F, typename... Args>
+template <typename>
+struct result_of;
+template <typename F, typename... Args>
struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
#else
-template<typename F> using result_of = std::result_of<F>;
+template <typename F>
+using result_of = std::result_of<F>;
#endif
} // namespace type_traits_internal
-template<typename F>
+template <typename F>
using result_of_t = typename type_traits_internal::result_of<F>::type;
namespace type_traits_internal {
@@ -463,20 +453,23 @@ namespace type_traits_internal {
// Make the swap-related traits/function accessible from this namespace.
using swap_internal::IsNothrowSwappable;
using swap_internal::IsSwappable;
-using swap_internal::Swap;
using swap_internal::StdSwapIsUnconstrained;
+using swap_internal::Swap;
} // namespace type_traits_internal
// absl::is_trivially_relocatable<T>
//
// Detects whether a type is known to be "trivially relocatable" -- meaning it
-// can be relocated without invoking the constructor/destructor, using a form of
-// move elision.
+// can be relocated from one place to another as if by memcpy/memmove.
+// This implies that its object representation doesn't depend on its address,
+// and also none of its special member functions do anything strange.
//
-// This trait is conservative, for backwards compatibility. If it's true then
-// the type is definitely trivially relocatable, but if it's false then the type
-// may or may not be.
+// This trait is conservative. If it's true then the type is definitely
+// trivially relocatable, but if it's false then the type may or may not be. For
+// example, std::vector<int> is trivially relocatable on every known STL
+// implementation, but absl::is_trivially_relocatable<std::vector<int>> remains
+// false.
//
// Example:
//
@@ -501,22 +494,34 @@ using swap_internal::StdSwapIsUnconstrained;
//
// TODO(b/275003464): remove the opt-out once the bug is fixed.
//
+// Starting with Xcode 15, the Apple compiler will falsely say a type
+// with a user-provided move constructor is trivially relocatable
+// (b/324278148). We will opt out without a version check, due to
+// the fluidity of Apple versions.
+//
+// TODO(b/324278148): If all versions we use have the bug fixed, then
+// remove the condition.
+//
+// Clang on all platforms fails to detect that a type with a user-provided
+// move-assignment operator is not trivially relocatable. So in fact we
+// opt out of Clang altogether, for now.
+//
+// TODO(b/325479096): Remove the opt-out once Clang's behavior is fixed.
+//
// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
// work with NVCC either.
-#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
- !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
- !defined(__NVCC__)
+#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+ (defined(__cpp_impl_trivially_relocatable) || \
+ (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
template <class T>
struct is_trivially_relocatable
: std::integral_constant<bool, __is_trivially_relocatable(T)> {};
#else
// Otherwise we use a fallback that detects only those types we can feasibly
-// detect. Any time that has trivial move-construction and destruction
-// operations is by definition trivially relocatable.
+// detect. Any type that is trivially copyable is by definition trivially
+// relocatable.
template <class T>
-struct is_trivially_relocatable
- : absl::conjunction<absl::is_trivially_move_constructible<T>,
- absl::is_trivially_destructible<T>> {};
+struct is_trivially_relocatable : std::is_trivially_copyable<T> {};
#endif
// absl::is_constant_evaluated()
@@ -558,6 +563,97 @@ constexpr bool is_constant_evaluated() noexcept {
#endif
}
#endif // ABSL_HAVE_CONSTANT_EVALUATED
+
+namespace type_traits_internal {
+
+// Detects if a class's definition has declared itself to be an owner by
+// declaring
+// using absl_internal_is_view = std::true_type;
+// as a member.
+// Types that don't want either must either omit this declaration entirely, or
+// (if e.g. inheriting from a base class) define the member to something that
+// isn't a Boolean trait class, such as `void`.
+// Do not specialize or use this directly. It's an implementation detail.
+template <typename T, typename = void>
+struct IsOwnerImpl : std::false_type {
+ static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
+ "type must lack qualifiers");
+};
+
+template <typename T>
+struct IsOwnerImpl<
+ T,
+ std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
+ : absl::negation<typename T::absl_internal_is_view> {};
+
+// A trait to determine whether a type is an owner.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature and its value may change in the future.
+// Do not specialize this; instead, define the member trait inside your type so
+// that it can be auto-detected, and to prevent ODR violations.
+// If it ever becomes possible to detect [[gsl::Owner]], we should leverage it:
+// https://wg21.link/p1179
+template <typename T>
+struct IsOwner : IsOwnerImpl<T> {};
+
+template <typename T, typename Traits, typename Alloc>
+struct IsOwner<std::basic_string<T, Traits, Alloc>> : std::true_type {};
+
+template <typename T, typename Alloc>
+struct IsOwner<std::vector<T, Alloc>> : std::true_type {};
+
+// Detects if a class's definition has declared itself to be a view by declaring
+// using absl_internal_is_view = std::true_type;
+// as a member.
+// Do not specialize or use this directly.
+template <typename T, typename = void>
+struct IsViewImpl : std::false_type {
+ static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
+ "type must lack qualifiers");
+};
+
+template <typename T>
+struct IsViewImpl<
+ T,
+ std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
+ : T::absl_internal_is_view {};
+
+// A trait to determine whether a type is a view.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature, and its value may change in the future.
+// Do not specialize this trait. Instead, define the member
+// using absl_internal_is_view = std::true_type;
+// in your class to allow its detection while preventing ODR violations.
+// If it ever becomes possible to detect [[gsl::Pointer]], we should leverage
+// it: https://wg21.link/p1179
+template <typename T>
+struct IsView : std::integral_constant<bool, std::is_pointer<T>::value ||
+ IsViewImpl<T>::value> {};
+
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+template <typename Char, typename Traits>
+struct IsView<std::basic_string_view<Char, Traits>> : std::true_type {};
+#endif
+
+#ifdef __cpp_lib_span
+template <typename T>
+struct IsView<std::span<T>> : std::true_type {};
+#endif
+
+// Determines whether the assignment of the given types is lifetime-bound.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature and its value may change in the future.
+// If it ever becomes possible to detect [[clang::lifetimebound]] directly,
+// we should change the implementation to leverage that.
+// Until then, we consider an assignment from an "owner" (such as std::string)
+// to a "view" (such as std::string_view) to be a lifetime-bound assignment.
+template <typename T, typename U>
+using IsLifetimeBoundAssignment =
+ std::integral_constant<bool, IsView<absl::remove_cvref_t<T>>::value &&
+ IsOwner<absl::remove_cvref_t<U>>::value>;
+
+} // namespace type_traits_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/meta/ya.make b/contrib/restricted/abseil-cpp/absl/meta/ya.make
index 9d5d65c475..038057a27b 100644
--- a/contrib/restricted/abseil-cpp/absl/meta/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/meta/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
PEERDIR(
contrib/restricted/abseil-cpp/absl/base
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
index daa32b5183..5d6c68d128 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128.cc
@@ -29,9 +29,6 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-ABSL_DLL const uint128 kuint128max = MakeUint128(
- std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max());
-
namespace {
// Returns the 0-based position of the last set bit (i.e., most significant bit)
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128.h b/contrib/restricted/abseil-cpp/absl/numeric/int128.h
index 7530a79341..5a067d17b3 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128.h
@@ -38,6 +38,7 @@
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/base/port.h"
+#include "absl/types/compare.h"
#if defined(_MSC_VER)
// In very old versions of MSVC and when the /Zc:wchar_t flag is off, wchar_t is
@@ -244,11 +245,6 @@ class
#endif // byte order
};
-// Prefer to use the constexpr `Uint128Max()`.
-//
-// TODO(absl-team) deprecate kuint128max once migration tool is released.
-ABSL_DLL extern const uint128 kuint128max;
-
// allow uint128 to be logged
std::ostream& operator<<(std::ostream& os, uint128 v);
@@ -274,7 +270,9 @@ class numeric_limits<absl::uint128> {
static constexpr bool has_infinity = false;
static constexpr bool has_quiet_NaN = false;
static constexpr bool has_signaling_NaN = false;
+ ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
static constexpr float_denorm_style has_denorm = denorm_absent;
+ ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
static constexpr bool has_denorm_loss = false;
static constexpr float_round_style round_style = round_toward_zero;
static constexpr bool is_iec559 = false;
@@ -517,7 +515,9 @@ class numeric_limits<absl::int128> {
static constexpr bool has_infinity = false;
static constexpr bool has_quiet_NaN = false;
static constexpr bool has_signaling_NaN = false;
+ ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
static constexpr float_denorm_style has_denorm = denorm_absent;
+ ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
static constexpr bool has_denorm_loss = false;
static constexpr float_round_style round_style = round_toward_zero;
static constexpr bool is_iec559 = false;
@@ -824,6 +824,36 @@ constexpr bool operator<=(uint128 lhs, uint128 rhs) { return !(rhs < lhs); }
constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+ if (auto lhs_128 = static_cast<unsigned __int128>(lhs),
+ rhs_128 = static_cast<unsigned __int128>(rhs);
+ lhs_128 < rhs_128) {
+ return absl::strong_ordering::less;
+ } else if (lhs_128 > rhs_128) {
+ return absl::strong_ordering::greater;
+ } else {
+ return absl::strong_ordering::equal;
+ }
+#else
+ if (uint64_t lhs_high = Uint128High64(lhs), rhs_high = Uint128High64(rhs);
+ lhs_high < rhs_high) {
+ return absl::strong_ordering::less;
+ } else if (lhs_high > rhs_high) {
+ return absl::strong_ordering::greater;
+ } else if (uint64_t lhs_low = Uint128Low64(lhs), rhs_low = Uint128Low64(rhs);
+ lhs_low < rhs_low) {
+ return absl::strong_ordering::less;
+ } else if (lhs_low > rhs_low) {
+ return absl::strong_ordering::greater;
+ } else {
+ return absl::strong_ordering::equal;
+ }
+#endif
+}
+#endif
+
// Unary operators.
constexpr inline uint128 operator+(uint128 val) { return val; }
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128_have_intrinsic.inc b/contrib/restricted/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
index 6f1ac64414..51e4b9d485 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128_have_intrinsic.inc
@@ -220,6 +220,20 @@ constexpr bool operator>=(int128 lhs, int128 rhs) {
return static_cast<__int128>(lhs) >= static_cast<__int128>(rhs);
}
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(int128 lhs, int128 rhs) {
+ if (auto lhs_128 = static_cast<__int128>(lhs),
+ rhs_128 = static_cast<__int128>(rhs);
+ lhs_128 < rhs_128) {
+ return absl::strong_ordering::less;
+ } else if (lhs_128 > rhs_128) {
+ return absl::strong_ordering::greater;
+ } else {
+ return absl::strong_ordering::equal;
+ }
+}
+#endif
+
// Unary operators.
constexpr int128 operator-(int128 v) { return -static_cast<__int128>(v); }
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/int128_no_intrinsic.inc b/contrib/restricted/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
index 6f5d83777c..195b7452ea 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
+++ b/contrib/restricted/abseil-cpp/absl/numeric/int128_no_intrinsic.inc
@@ -186,6 +186,24 @@ constexpr bool operator<=(int128 lhs, int128 rhs) { return !(lhs > rhs); }
constexpr bool operator>=(int128 lhs, int128 rhs) { return !(lhs < rhs); }
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(int128 lhs, int128 rhs) {
+ if (int64_t lhs_high = Int128High64(lhs), rhs_high = Int128High64(rhs);
+ lhs_high < rhs_high) {
+ return absl::strong_ordering::less;
+ } else if (lhs_high > rhs_high) {
+ return absl::strong_ordering::greater;
+ } else if (uint64_t lhs_low = Uint128Low64(lhs), rhs_low = Uint128Low64(rhs);
+ lhs_low < rhs_low) {
+ return absl::strong_ordering::less;
+ } else if (lhs_low > rhs_low) {
+ return absl::strong_ordering::greater;
+ } else {
+ return absl::strong_ordering::equal;
+ }
+}
+#endif
+
// Unary operators.
constexpr int128 operator-(int128 v) {
diff --git a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
index bfef06bce1..0917464d6a 100644
--- a/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
+++ b/contrib/restricted/abseil-cpp/absl/numeric/internal/bits.h
@@ -167,7 +167,9 @@ CountLeadingZeroes32(uint32_t x) {
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_clzs)
+#if ABSL_HAVE_BUILTIN(__builtin_clzg)
+ return x == 0 ? 16 : __builtin_clzg(x);
+#elif ABSL_HAVE_BUILTIN(__builtin_clzs)
static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
"__builtin_clzs does not take 16-bit arg");
return x == 0 ? 16 : __builtin_clzs(x);
@@ -303,7 +305,9 @@ CountTrailingZeroesNonzero64(uint64_t x) {
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
+#if ABSL_HAVE_BUILTIN(__builtin_ctzg)
+ return __builtin_ctzg(x);
+#elif ABSL_HAVE_BUILTIN(__builtin_ctzs)
static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int)
"__builtin_ctzs does not take 16-bit arg");
return __builtin_ctzs(x);
diff --git a/contrib/restricted/abseil-cpp/absl/profiling/internal/periodic_sampler.h b/contrib/restricted/abseil-cpp/absl/profiling/internal/periodic_sampler.h
index 54f0af452b..f5d847ab25 100644
--- a/contrib/restricted/abseil-cpp/absl/profiling/internal/periodic_sampler.h
+++ b/contrib/restricted/abseil-cpp/absl/profiling/internal/periodic_sampler.h
@@ -172,7 +172,7 @@ inline bool PeriodicSamplerBase::Sample() noexcept {
// Typical use case:
//
// struct HashTablezTag {};
-// thread_local PeriodicSampler sampler;
+// thread_local PeriodicSampler<HashTablezTag, 100> sampler;
//
// void HashTableSamplingLogic(...) {
// if (sampler.Sample()) {
diff --git a/contrib/restricted/abseil-cpp/absl/random/seed_sequences.h b/contrib/restricted/abseil-cpp/absl/random/seed_sequences.h
index c3af4b00a4..33970be581 100644
--- a/contrib/restricted/abseil-cpp/absl/random/seed_sequences.h
+++ b/contrib/restricted/abseil-cpp/absl/random/seed_sequences.h
@@ -29,9 +29,11 @@
#include <random>
#include "absl/base/config.h"
+#include "absl/base/nullability.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/random/seed_gen_exception.h"
+#include "absl/strings/string_view.h"
#include "absl/types/span.h"
namespace absl {
diff --git a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
index 5be94903b7..67603156a4 100644
--- a/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/status/internal/statusor_internal.h
@@ -123,11 +123,70 @@ using IsForwardingAssignmentValid = absl::disjunction<
std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
IsForwardingAssignmentAmbiguous<T, U>>>>;
+template <bool Value, typename T>
+using Equality = std::conditional_t<Value, T, absl::negation<T>>;
+
+template <bool Explicit, typename T, typename U, bool Lifetimebound>
+using IsConstructionValid = absl::conjunction<
+ Equality<Lifetimebound,
+ type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+ IsDirectInitializationValid<T, U&&>, std::is_constructible<T, U&&>,
+ Equality<!Explicit, std::is_convertible<U&&, T>>,
+ absl::disjunction<
+ std::is_same<T, absl::remove_cvref_t<U>>,
+ absl::conjunction<
+ std::conditional_t<
+ Explicit,
+ absl::negation<std::is_constructible<absl::Status, U&&>>,
+ absl::negation<std::is_convertible<U&&, absl::Status>>>,
+ absl::negation<
+ internal_statusor::HasConversionOperatorToStatusOr<T, U&&>>>>>;
+
+template <typename T, typename U, bool Lifetimebound>
+using IsAssignmentValid = absl::conjunction<
+ Equality<Lifetimebound,
+ type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+ std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
+ absl::disjunction<
+ std::is_same<T, absl::remove_cvref_t<U>>,
+ absl::conjunction<
+ absl::negation<std::is_convertible<U&&, absl::Status>>,
+ absl::negation<HasConversionOperatorToStatusOr<T, U&&>>>>,
+ IsForwardingAssignmentValid<T, U&&>>;
+
+template <bool Explicit, typename T, typename U>
+using IsConstructionFromStatusValid = absl::conjunction<
+ absl::negation<std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>>,
+ absl::negation<std::is_same<T, absl::remove_cvref_t<U>>>,
+ absl::negation<std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>>,
+ Equality<!Explicit, std::is_convertible<U, absl::Status>>,
+ std::is_constructible<absl::Status, U>,
+ absl::negation<HasConversionOperatorToStatusOr<T, U>>>;
+
+template <bool Explicit, typename T, typename U, bool Lifetimebound,
+ typename UQ>
+using IsConstructionFromStatusOrValid = absl::conjunction<
+ absl::negation<std::is_same<T, U>>,
+ Equality<Lifetimebound,
+ type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+ std::is_constructible<T, UQ>,
+ Equality<!Explicit, std::is_convertible<UQ, T>>,
+ absl::negation<IsConstructibleOrConvertibleFromStatusOr<T, U>>>;
+
+template <typename T, typename U, bool Lifetimebound>
+using IsStatusOrAssignmentValid = absl::conjunction<
+ absl::negation<std::is_same<T, absl::remove_cvref_t<U>>>,
+ Equality<Lifetimebound,
+ type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+ std::is_constructible<T, U>, std::is_assignable<T, U>,
+ absl::negation<IsConstructibleOrConvertibleOrAssignableFromStatusOr<
+ T, absl::remove_cvref_t<U>>>>;
+
class Helper {
public:
// Move type-agnostic error handling to the .cc.
static void HandleInvalidStatusCtorArg(absl::Nonnull<Status*>);
- ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);
+ [[noreturn]] static void Crash(const absl::Status& status);
};
// Construct an instance of T in `p` through placement new, passing Args... to
@@ -379,7 +438,7 @@ struct MoveAssignBase<T, false> {
MoveAssignBase& operator=(MoveAssignBase&&) = delete;
};
-ABSL_ATTRIBUTE_NORETURN void ThrowBadStatusOrAccess(absl::Status status);
+[[noreturn]] void ThrowBadStatusOrAccess(absl::Status status);
// Used to introduce jitter into the output of printing functions for
// `StatusOr` (i.e. `AbslStringify` and `operator<<`).
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.cc b/contrib/restricted/abseil-cpp/absl/status/status.cc
index 4dd5ae06ce..745ab88922 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.cc
+++ b/contrib/restricted/abseil-cpp/absl/status/status.cc
@@ -273,14 +273,12 @@ StatusCode ErrnoToStatusCode(int error_number) {
case EFAULT: // Bad address
case EILSEQ: // Illegal byte sequence
case ENOPROTOOPT: // Protocol not available
- case ENOSTR: // Not a STREAM
case ENOTSOCK: // Not a socket
case ENOTTY: // Inappropriate I/O control operation
case EPROTOTYPE: // Protocol wrong type for socket
case ESPIPE: // Invalid seek
return StatusCode::kInvalidArgument;
case ETIMEDOUT: // Connection timed out
- case ETIME: // Timer expired
return StatusCode::kDeadlineExceeded;
case ENODEV: // No such device
case ENOENT: // No such file or directory
@@ -339,9 +337,7 @@ StatusCode ErrnoToStatusCode(int error_number) {
case EMLINK: // Too many links
case ENFILE: // Too many open files in system
case ENOBUFS: // No buffer space available
- case ENODATA: // No message is available on the STREAM read queue
case ENOMEM: // Not enough space
- case ENOSR: // No STREAM resources
#ifdef EUSERS
case EUSERS: // Too many users
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/status/status.h b/contrib/restricted/abseil-cpp/absl/status/status.h
index 9ce16db961..6cfe49f2dd 100644
--- a/contrib/restricted/abseil-cpp/absl/status/status.h
+++ b/contrib/restricted/abseil-cpp/absl/status/status.h
@@ -452,7 +452,7 @@ class ABSL_ATTRIBUTE_TRIVIAL_ABI Status final {
// The moved-from state is valid but unspecified.
Status(Status&&) noexcept;
- Status& operator=(Status&&);
+ Status& operator=(Status&&) noexcept;
~Status();
@@ -539,7 +539,7 @@ class ABSL_ATTRIBUTE_TRIVIAL_ABI Status final {
// swap()
//
// Swap the contents of one status with another.
- friend void swap(Status& a, Status& b);
+ friend void swap(Status& a, Status& b) noexcept;
//----------------------------------------------------------------------------
// Payload Management APIs
@@ -789,7 +789,7 @@ inline Status::Status(Status&& x) noexcept : Status(x.rep_) {
x.rep_ = MovedFromRep();
}
-inline Status& Status::operator=(Status&& x) {
+inline Status& Status::operator=(Status&& x) noexcept {
uintptr_t old_rep = rep_;
if (x.rep_ != old_rep) {
rep_ = x.rep_;
@@ -852,7 +852,7 @@ inline void Status::IgnoreError() const {
// no-op
}
-inline void swap(absl::Status& a, absl::Status& b) {
+inline void swap(absl::Status& a, absl::Status& b) noexcept {
using std::swap;
swap(a.rep_, b.rep_);
}
diff --git a/contrib/restricted/abseil-cpp/absl/status/statusor.h b/contrib/restricted/abseil-cpp/absl/status/statusor.h
index cd35e5b436..b1da45e6f0 100644
--- a/contrib/restricted/abseil-cpp/absl/status/statusor.h
+++ b/contrib/restricted/abseil-cpp/absl/status/statusor.h
@@ -236,57 +236,55 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// is explicit if and only if the corresponding construction of `T` from `U`
// is explicit. (This constructor inherits its explicitness from the
// underlying constructor.)
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>,
- std::is_constructible<T, const U&>,
- std::is_convertible<const U&, T>,
- absl::negation<
- internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ false, T, U, false, const U&>::value,
+ int> = 0>
StatusOr(const StatusOr<U>& other) // NOLINT
: Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>,
- std::is_constructible<T, const U&>,
- absl::negation<std::is_convertible<const U&, T>>,
- absl::negation<
- internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ false, T, U, true, const U&>::value,
+ int> = 0>
+ StatusOr(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT
+ : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ true, T, U, false, const U&>::value,
+ int> = 0>
explicit StatusOr(const StatusOr<U>& other)
: Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ true, T, U, true, const U&>::value,
+ int> = 0>
+ explicit StatusOr(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
- std::is_convertible<U&&, T>,
- absl::negation<
- internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ false, T, U, false, U&&>::value,
+ int> = 0>
StatusOr(StatusOr<U>&& other) // NOLINT
: Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
- absl::negation<std::is_convertible<U&&, T>>,
- absl::negation<
- internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ false, T, U, true, U&&>::value,
+ int> = 0>
+ StatusOr(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT
+ : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ true, T, U, false, U&&>::value,
+ int> = 0>
explicit StatusOr(StatusOr<U>&& other)
: Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+ template <typename U, absl::enable_if_t<
+ internal_statusor::IsConstructionFromStatusOrValid<
+ true, T, U, true, U&&>::value,
+ int> = 0>
+ explicit StatusOr(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
// Converting Assignment Operators
@@ -307,37 +305,38 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// These overloads only apply if `absl::StatusOr<T>` is constructible and
// assignable from `absl::StatusOr<U>` and `StatusOr<T>` cannot be directly
// assigned from `StatusOr<U>`.
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>,
- std::is_constructible<T, const U&>,
- std::is_assignable<T, const U&>,
- absl::negation<
- internal_statusor::
- IsConstructibleOrConvertibleOrAssignableFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U,
+ absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+ T, const U&, false>::value,
+ int> = 0>
StatusOr& operator=(const StatusOr<U>& other) {
this->Assign(other);
return *this;
}
- template <
- typename U,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
- std::is_assignable<T, U&&>,
- absl::negation<
- internal_statusor::
- IsConstructibleOrConvertibleOrAssignableFromStatusOr<
- T, U>>>::value,
- int> = 0>
+ template <typename U,
+ absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+ T, const U&, true>::value,
+ int> = 0>
+ StatusOr& operator=(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+ this->Assign(other);
+ return *this;
+ }
+ template <typename U,
+ absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+ T, U&&, false>::value,
+ int> = 0>
StatusOr& operator=(StatusOr<U>&& other) {
this->Assign(std::move(other));
return *this;
}
+ template <typename U,
+ absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+ T, U&&, true>::value,
+ int> = 0>
+ StatusOr& operator=(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+ this->Assign(std::move(other));
+ return *this;
+ }
// Constructs a new `absl::StatusOr<T>` with a non-ok status. After calling
// this constructor, `this->ok()` will be `false` and calls to `value()` will
@@ -350,46 +349,21 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
// In optimized builds, passing absl::OkStatus() here will have the effect
// of passing absl::StatusCode::kInternal as a fallback.
- template <
- typename U = absl::Status,
- absl::enable_if_t<
- absl::conjunction<
- std::is_convertible<U&&, absl::Status>,
- std::is_constructible<absl::Status, U&&>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
- absl::negation<std::is_same<absl::decay_t<U>, T>>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
- absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
- T, U&&>>>::value,
- int> = 0>
+ template <typename U = absl::Status,
+ absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+ false, T, U>::value,
+ int> = 0>
StatusOr(U&& v) : Base(std::forward<U>(v)) {}
- template <
- typename U = absl::Status,
- absl::enable_if_t<
- absl::conjunction<
- absl::negation<std::is_convertible<U&&, absl::Status>>,
- std::is_constructible<absl::Status, U&&>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
- absl::negation<std::is_same<absl::decay_t<U>, T>>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
- absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
- T, U&&>>>::value,
- int> = 0>
+ template <typename U = absl::Status,
+ absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+ true, T, U>::value,
+ int> = 0>
explicit StatusOr(U&& v) : Base(std::forward<U>(v)) {}
-
- template <
- typename U = absl::Status,
- absl::enable_if_t<
- absl::conjunction<
- std::is_convertible<U&&, absl::Status>,
- std::is_constructible<absl::Status, U&&>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
- absl::negation<std::is_same<absl::decay_t<U>, T>>,
- absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
- absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
- T, U&&>>>::value,
- int> = 0>
+ template <typename U = absl::Status,
+ absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+ false, T, U>::value,
+ int> = 0>
StatusOr& operator=(U&& v) {
this->AssignStatus(std::forward<U>(v));
return *this;
@@ -411,21 +385,22 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// StatusOr<bool> s1 = true; // s1.ok() && *s1 == true
// StatusOr<bool> s2 = false; // s2.ok() && *s2 == false
// s1 = s2; // ambiguous, `s1 = *s2` or `s1 = bool(s2)`?
- template <
- typename U = T,
- typename = typename std::enable_if<absl::conjunction<
- std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
- absl::disjunction<
- std::is_same<absl::remove_cvref_t<U>, T>,
- absl::conjunction<
- absl::negation<std::is_convertible<U&&, absl::Status>>,
- absl::negation<internal_statusor::
- HasConversionOperatorToStatusOr<T, U&&>>>>,
- internal_statusor::IsForwardingAssignmentValid<T, U&&>>::value>::type>
+ template <typename U = T,
+ typename std::enable_if<
+ internal_statusor::IsAssignmentValid<T, U, false>::value,
+ int>::type = 0>
StatusOr& operator=(U&& v) {
this->Assign(std::forward<U>(v));
return *this;
}
+ template <typename U = T,
+ typename std::enable_if<
+ internal_statusor::IsAssignmentValid<T, U, true>::value,
+ int>::type = 0>
+ StatusOr& operator=(U&& v ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+ this->Assign(std::forward<U>(v));
+ return *this;
+ }
// Constructs the inner value `T` in-place using the provided args, using the
// `T(args...)` constructor.
@@ -442,40 +417,31 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// This constructor is explicit if `U` is not convertible to `T`. To avoid
// ambiguity, this constructor is disabled if `U` is a `StatusOr<J>`, where
// `J` is convertible to `T`.
- template <
- typename U = T,
- absl::enable_if_t<
- absl::conjunction<
- internal_statusor::IsDirectInitializationValid<T, U&&>,
- std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
- absl::disjunction<
- std::is_same<absl::remove_cvref_t<U>, T>,
- absl::conjunction<
- absl::negation<std::is_convertible<U&&, absl::Status>>,
- absl::negation<
- internal_statusor::HasConversionOperatorToStatusOr<
- T, U&&>>>>>::value,
- int> = 0>
+ template <typename U = T,
+ absl::enable_if_t<internal_statusor::IsConstructionValid<
+ false, T, U, false>::value,
+ int> = 0>
StatusOr(U&& u) // NOLINT
: StatusOr(absl::in_place, std::forward<U>(u)) {}
+ template <typename U = T,
+ absl::enable_if_t<internal_statusor::IsConstructionValid<
+ false, T, U, true>::value,
+ int> = 0>
+ StatusOr(U&& u ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT
+ : StatusOr(absl::in_place, std::forward<U>(u)) {}
- template <
- typename U = T,
- absl::enable_if_t<
- absl::conjunction<
- internal_statusor::IsDirectInitializationValid<T, U&&>,
- absl::disjunction<
- std::is_same<absl::remove_cvref_t<U>, T>,
- absl::conjunction<
- absl::negation<std::is_constructible<absl::Status, U&&>>,
- absl::negation<
- internal_statusor::HasConversionOperatorToStatusOr<
- T, U&&>>>>,
- std::is_constructible<T, U&&>,
- absl::negation<std::is_convertible<U&&, T>>>::value,
- int> = 0>
+ template <typename U = T,
+ absl::enable_if_t<internal_statusor::IsConstructionValid<
+ true, T, U, false>::value,
+ int> = 0>
explicit StatusOr(U&& u) // NOLINT
: StatusOr(absl::in_place, std::forward<U>(u)) {}
+ template <typename U = T,
+ absl::enable_if_t<
+ internal_statusor::IsConstructionValid<true, T, U, true>::value,
+ int> = 0>
+ explicit StatusOr(U&& u ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT
+ : StatusOr(absl::in_place, std::forward<U>(u)) {}
// StatusOr<T>::ok()
//
diff --git a/contrib/restricted/abseil-cpp/absl/strings/ascii.cc b/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
index 5460b2c6dc..20a696a1f6 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/ascii.cc
@@ -15,13 +15,14 @@
#include "absl/strings/ascii.h"
#include <climits>
-#include <cstdint>
+#include <cstddef>
#include <cstring>
#include <string>
-#include <type_traits>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -162,19 +163,6 @@ ABSL_DLL const char kToUpper[256] = {
};
// clang-format on
-template <class T>
-static constexpr T BroadcastByte(unsigned char value) {
- static_assert(std::is_integral<T>::value && sizeof(T) <= sizeof(uint64_t) &&
- std::is_unsigned<T>::value,
- "only unsigned integers up to 64-bit allowed");
- T result = value;
- constexpr size_t result_bit_width = sizeof(result) * CHAR_BIT;
- result |= result << ((CHAR_BIT << 0) & (result_bit_width - 1));
- result |= result << ((CHAR_BIT << 1) & (result_bit_width - 1));
- result |= result << ((CHAR_BIT << 2) & (result_bit_width - 1));
- return result;
-}
-
// Returns whether `c` is in the a-z/A-Z range (w.r.t. `ToUpper`).
// Implemented by:
// 1. Pushing the a-z/A-Z range to [SCHAR_MIN, SCHAR_MIN + 26).
@@ -189,47 +177,10 @@ constexpr bool AsciiInAZRange(unsigned char c) {
return static_cast<signed char>(u) < threshold;
}
+// Force-inline so the compiler won't merge the short and long implementations.
template <bool ToUpper>
-static constexpr char* PartialAsciiStrCaseFold(absl::Nonnull<char*> p,
- absl::Nonnull<char*> end) {
- using vec_t = size_t;
- const size_t n = static_cast<size_t>(end - p);
-
- // SWAR algorithm: http://0x80.pl/notesen/2016-01-06-swar-swap-case.html
- constexpr char ch_a = ToUpper ? 'a' : 'A', ch_z = ToUpper ? 'z' : 'Z';
- char* const swar_end = p + (n / sizeof(vec_t)) * sizeof(vec_t);
- while (p < swar_end) {
- vec_t v = vec_t();
-
- // memcpy the vector, but constexpr
- for (size_t i = 0; i < sizeof(vec_t); ++i) {
- v |= static_cast<vec_t>(static_cast<unsigned char>(p[i]))
- << (i * CHAR_BIT);
- }
-
- constexpr unsigned int msb = 1u << (CHAR_BIT - 1);
- const vec_t v_msb = v & BroadcastByte<vec_t>(msb);
- const vec_t v_nonascii_mask = (v_msb << 1) - (v_msb >> (CHAR_BIT - 1));
- const vec_t v_nonascii = v & v_nonascii_mask;
- const vec_t v_ascii = v & ~v_nonascii_mask;
- const vec_t a = v_ascii + BroadcastByte<vec_t>(msb - ch_a - 0),
- z = v_ascii + BroadcastByte<vec_t>(msb - ch_z - 1);
- v = v_nonascii | (v_ascii ^ ((a ^ z) & BroadcastByte<vec_t>(msb)) >> 2);
-
- // memcpy the vector, but constexpr
- for (size_t i = 0; i < sizeof(vec_t); ++i) {
- p[i] = static_cast<char>(v >> (i * CHAR_BIT));
- }
-
- p += sizeof(v);
- }
-
- return p;
-}
-
-template <bool ToUpper>
-static constexpr void AsciiStrCaseFold(absl::Nonnull<char*> p,
- absl::Nonnull<char*> end) {
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline constexpr void AsciiStrCaseFoldImpl(
+ absl::Nonnull<char*> p, size_t size) {
// The upper- and lowercase versions of ASCII characters differ by only 1 bit.
// When we need to flip the case, we can xor with this bit to achieve the
// desired result. Note that the choice of 'a' and 'A' here is arbitrary. We
@@ -237,20 +188,32 @@ static constexpr void AsciiStrCaseFold(absl::Nonnull<char*> p,
// have the same single bit difference.
constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
- using vec_t = size_t;
- // TODO(b/316380338): When FDO becomes able to vectorize these,
- // revert this manual optimization and just leave the naive loop.
- if (static_cast<size_t>(end - p) >= sizeof(vec_t)) {
- p = ascii_internal::PartialAsciiStrCaseFold<ToUpper>(p, end);
- }
- while (p < end) {
- unsigned char v = static_cast<unsigned char>(*p);
+ for (size_t i = 0; i < size; ++i) {
+ unsigned char v = static_cast<unsigned char>(p[i]);
v ^= AsciiInAZRange<ToUpper>(v) ? kAsciiCaseBitFlip : 0;
- *p = static_cast<char>(v);
- ++p;
+ p[i] = static_cast<char>(v);
}
}
+// The string size threshold for starting using the long string version.
+constexpr size_t kCaseFoldThreshold = 16;
+
+// No-inline so the compiler won't merge the short and long implementations.
+template <bool ToUpper>
+ABSL_ATTRIBUTE_NOINLINE constexpr void AsciiStrCaseFoldLong(
+ absl::Nonnull<char*> p, size_t size) {
+ ABSL_ASSUME(size >= kCaseFoldThreshold);
+ AsciiStrCaseFoldImpl<ToUpper>(p, size);
+}
+
+// Splitting to short and long strings to allow vectorization decisions
+// to be made separately in the long and short cases.
+template <bool ToUpper>
+constexpr void AsciiStrCaseFold(absl::Nonnull<char*> p, size_t size) {
+ size < kCaseFoldThreshold ? AsciiStrCaseFoldImpl<ToUpper>(p, size)
+ : AsciiStrCaseFoldLong<ToUpper>(p, size);
+}
+
static constexpr size_t ValidateAsciiCasefold() {
constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
size_t incorrect_index = 0;
@@ -259,8 +222,8 @@ static constexpr size_t ValidateAsciiCasefold() {
for (unsigned int i = 0; i < num_chars; ++i) {
uppered[i] = lowered[i] = static_cast<char>(i);
}
- AsciiStrCaseFold<false>(&lowered[0], &lowered[num_chars]);
- AsciiStrCaseFold<true>(&uppered[0], &uppered[num_chars]);
+ AsciiStrCaseFold<false>(&lowered[0], num_chars);
+ AsciiStrCaseFold<true>(&uppered[0], num_chars);
for (size_t i = 0; i < num_chars; ++i) {
const char ch = static_cast<char>(i),
ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
@@ -278,13 +241,11 @@ static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
} // namespace ascii_internal
void AsciiStrToLower(absl::Nonnull<std::string*> s) {
- char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
- return ascii_internal::AsciiStrCaseFold<false>(p, p + s->size());
+ return ascii_internal::AsciiStrCaseFold<false>(&(*s)[0], s->size());
}
void AsciiStrToUpper(absl::Nonnull<std::string*> s) {
- char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
- return ascii_internal::AsciiStrCaseFold<true>(p, p + s->size());
+ return ascii_internal::AsciiStrCaseFold<true>(&(*s)[0], s->size());
}
void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str) {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.cc b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
index f67326fdef..f0f4f31ac5 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.cc
@@ -75,7 +75,7 @@ using ::absl::cord_internal::kMinFlatLength;
using ::absl::cord_internal::kInlinedVectorSize;
using ::absl::cord_internal::kMaxBytesToCopy;
-static void DumpNode(absl::Nonnull<CordRep*> rep, bool include_data,
+static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
absl::Nonnull<std::ostream*> os, int indent = 0);
static bool VerifyNode(absl::Nonnull<CordRep*> root,
absl::Nonnull<CordRep*> start_node);
@@ -425,8 +425,8 @@ Cord& Cord::operator=(absl::string_view src) {
// we keep it here to make diffs easier.
void Cord::InlineRep::AppendArray(absl::string_view src,
MethodIdentifier method) {
- MaybeRemoveEmptyCrcNode();
if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined.
+ MaybeRemoveEmptyCrcNode();
size_t appended = 0;
CordRep* rep = tree();
@@ -1062,6 +1062,15 @@ void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
}
}
+void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+ const size_t cur_dst_size = dst->size();
+ const size_t new_dst_size = cur_dst_size + src.size();
+ absl::strings_internal::STLStringResizeUninitializedAmortized(dst,
+ new_dst_size);
+ char* append_ptr = &(*dst)[cur_dst_size];
+ src.CopyToArrayImpl(append_ptr);
+}
+
void Cord::CopyToArraySlowPath(absl::Nonnull<char*> dst) const {
assert(contents_.is_tree());
absl::string_view fragment;
@@ -1448,14 +1457,13 @@ absl::string_view Cord::FlattenSlowPath() {
}
}
-static void DumpNode(absl::Nonnull<CordRep*> rep, bool include_data,
+static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
absl::Nonnull<std::ostream*> os, int indent) {
+ CordRep* rep = nonnull_rep;
const int kIndentStep = 1;
- absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
- absl::InlinedVector<int, kInlinedVectorSize> indents;
for (;;) {
- *os << std::setw(3) << rep->refcount.Get();
- *os << " " << std::setw(7) << rep->length;
+ *os << std::setw(3) << (rep == nullptr ? 0 : rep->refcount.Get());
+ *os << " " << std::setw(7) << (rep == nullptr ? 0 : rep->length);
*os << " [";
if (include_data) *os << static_cast<void*>(rep);
*os << "]";
@@ -1477,26 +1485,23 @@ static void DumpNode(absl::Nonnull<CordRep*> rep, bool include_data,
if (rep->IsExternal()) {
*os << "EXTERNAL [";
if (include_data)
- *os << absl::CEscape(std::string(rep->external()->base, rep->length));
+ *os << absl::CEscape(
+ absl::string_view(rep->external()->base, rep->length));
*os << "]\n";
} else if (rep->IsFlat()) {
*os << "FLAT cap=" << rep->flat()->Capacity() << " [";
if (include_data)
- *os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
+ *os << absl::CEscape(
+ absl::string_view(rep->flat()->Data(), rep->length));
*os << "]\n";
} else {
CordRepBtree::Dump(rep, /*label=*/"", include_data, *os);
}
}
if (leaf) {
- if (stack.empty()) break;
- rep = stack.back();
- stack.pop_back();
- indent = indents.back();
- indents.pop_back();
+ break;
}
}
- ABSL_INTERNAL_CHECK(indents.empty(), "");
}
static std::string ReportError(absl::Nonnull<CordRep*> root,
diff --git a/contrib/restricted/abseil-cpp/absl/strings/cord.h b/contrib/restricted/abseil-cpp/absl/strings/cord.h
index b3e556b6b4..69aa8ef41f 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/cord.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/cord.h
@@ -75,6 +75,7 @@
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/inlined_vector.h"
#include "absl/crc/internal/crc_cord_state.h"
@@ -95,6 +96,7 @@
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/internal/string_constant.h"
#include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
#include "absl/types/optional.h"
namespace absl {
@@ -104,6 +106,7 @@ class CordTestPeer;
template <typename Releaser>
Cord MakeCordFromExternal(absl::string_view, Releaser&&);
void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
+void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
// Cord memory accounting modes
enum class CordMemoryAccounting {
@@ -420,6 +423,18 @@ class Cord {
friend void CopyCordToString(const Cord& src,
absl::Nonnull<std::string*> dst);
+ // AppendCordToString()
+ //
+ // Appends the contents of a `src` Cord to a `*dst` string.
+ //
+ // This function optimizes the case of appending to a non-empty destination
+ // string. If `*dst` already has capacity to store the contents of the cord,
+ // this function does not invalidate pointers previously returned by
+ // `dst->data()`. If `*dst` is a new object, prefer to simply use the
+ // conversion operator to `std::string`.
+ friend void AppendCordToString(const Cord& src,
+ absl::Nonnull<std::string*> dst);
+
class CharIterator;
//----------------------------------------------------------------------------
@@ -757,7 +772,7 @@ class Cord {
// Cord::Find()
//
- // Returns an iterator to the first occurrance of the substring `needle`.
+ // Returns an iterator to the first occurrence of the substring `needle`.
//
// If the substring `needle` does not occur, `Cord::char_end()` is returned.
CharIterator Find(absl::string_view needle) const;
@@ -835,6 +850,38 @@ class Cord {
friend bool operator==(const Cord& lhs, const Cord& rhs);
friend bool operator==(const Cord& lhs, absl::string_view rhs);
+#ifdef __cpp_impl_three_way_comparison
+
+ // Cords support comparison with other Cords and string_views via operator<
+ // and others; here we provide a wrapper for the C++20 three-way comparison
+ // <=> operator.
+
+ static inline std::strong_ordering ConvertCompareResultToStrongOrdering(
+ int c) {
+ if (c == 0) {
+ return std::strong_ordering::equal;
+ } else if (c < 0) {
+ return std::strong_ordering::less;
+ } else {
+ return std::strong_ordering::greater;
+ }
+ }
+
+ friend inline std::strong_ordering operator<=>(const Cord& x, const Cord& y) {
+ return ConvertCompareResultToStrongOrdering(x.Compare(y));
+ }
+
+ friend inline std::strong_ordering operator<=>(const Cord& lhs,
+ absl::string_view rhs) {
+ return ConvertCompareResultToStrongOrdering(lhs.Compare(rhs));
+ }
+
+ friend inline std::strong_ordering operator<=>(absl::string_view lhs,
+ const Cord& rhs) {
+ return ConvertCompareResultToStrongOrdering(-rhs.Compare(lhs));
+ }
+#endif
+
friend absl::Nullable<const CordzInfo*> GetCordzInfoForTesting(
const Cord& cord);
@@ -1065,6 +1112,8 @@ class Cord {
const;
CharIterator FindImpl(CharIterator it, absl::string_view needle) const;
+
+ void CopyToArrayImpl(absl::Nonnull<char*> dst) const;
};
ABSL_NAMESPACE_END
@@ -1103,8 +1152,8 @@ absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
// Overload for function reference types that dispatches using a function
// pointer because there are no `alignof()` or `sizeof()` a function reference.
// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-inline absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
- void (&releaser)(absl::string_view)) {
+inline absl::Nonnull<CordRep*> NewExternalRep(
+ absl::string_view data, void (&releaser)(absl::string_view)) {
return NewExternalRep(data, &releaser);
}
@@ -1120,7 +1169,7 @@ Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
} else {
using ReleaserType = absl::decay_t<Releaser>;
cord_internal::InvokeReleaser(
- cord_internal::Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+ cord_internal::Rank1{}, ReleaserType(std::forward<Releaser>(releaser)),
data);
}
return cord;
@@ -1170,7 +1219,8 @@ inline void Cord::InlineRep::Swap(absl::Nonnull<Cord::InlineRep*> rhs) {
if (rhs == this) {
return;
}
- std::swap(data_, rhs->data_);
+ using std::swap;
+ swap(data_, rhs->data_);
}
inline absl::Nullable<const char*> Cord::InlineRep::data() const {
@@ -1352,7 +1402,8 @@ inline size_t Cord::EstimatedMemoryUsage(
return result;
}
-inline absl::optional<absl::string_view> Cord::TryFlat() const {
+inline absl::optional<absl::string_view> Cord::TryFlat() const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
absl::cord_internal::CordRep* rep = contents_.tree();
if (rep == nullptr) {
return absl::string_view(contents_.data(), contents_.size());
@@ -1364,7 +1415,7 @@ inline absl::optional<absl::string_view> Cord::TryFlat() const {
return absl::nullopt;
}
-inline absl::string_view Cord::Flatten() {
+inline absl::string_view Cord::Flatten() ABSL_ATTRIBUTE_LIFETIME_BOUND {
absl::cord_internal::CordRep* rep = contents_.tree();
if (rep == nullptr) {
return absl::string_view(contents_.data(), contents_.size());
@@ -1387,6 +1438,7 @@ inline void Cord::Prepend(absl::string_view src) {
inline void Cord::Append(CordBuffer buffer) {
if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ contents_.MaybeRemoveEmptyCrcNode();
absl::string_view short_value;
if (CordRep* rep = buffer.ConsumeValue(short_value)) {
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer);
@@ -1397,6 +1449,7 @@ inline void Cord::Append(CordBuffer buffer) {
inline void Cord::Prepend(CordBuffer buffer) {
if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ contents_.MaybeRemoveEmptyCrcNode();
absl::string_view short_value;
if (CordRep* rep = buffer.ConsumeValue(short_value)) {
contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer);
@@ -1445,6 +1498,14 @@ inline bool Cord::StartsWith(absl::string_view rhs) const {
return EqualsImpl(rhs, rhs_size);
}
+inline void Cord::CopyToArrayImpl(absl::Nonnull<char*> dst) const {
+ if (!contents_.is_tree()) {
+ if (!empty()) contents_.CopyToArray(dst);
+ } else {
+ CopyToArraySlowPath(dst);
+ }
+}
+
inline void Cord::ChunkIterator::InitTree(
absl::Nonnull<cord_internal::CordRep*> tree) {
tree = cord_internal::SkipCrcNode(tree);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
index 27d3d98c28..4ffef94b63 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/escaping.cc
@@ -21,10 +21,12 @@
#include <cstring>
#include <limits>
#include <string>
+#include <utility>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/nullability.h"
#include "absl/strings/ascii.h"
#include "absl/strings/charset.h"
#include "absl/strings/internal/escaping.h"
@@ -54,7 +56,8 @@ inline unsigned int hex_digit_to_int(char c) {
return x & 0xf;
}
-inline bool IsSurrogate(char32_t c, absl::string_view src, std::string* error) {
+inline bool IsSurrogate(char32_t c, absl::string_view src,
+ absl::Nullable<std::string*> error) {
if (c >= 0xD800 && c <= 0xDFFF) {
if (error) {
*error = absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
@@ -83,7 +86,9 @@ inline bool IsSurrogate(char32_t c, absl::string_view src, std::string* error) {
// UnescapeCEscapeSequences().
// ----------------------------------------------------------------------
bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
- char* dest, ptrdiff_t* dest_len, std::string* error) {
+ absl::Nonnull<char*> dest,
+ absl::Nonnull<ptrdiff_t*> dest_len,
+ absl::Nullable<std::string*> error) {
char* d = dest;
const char* p = source.data();
const char* end = p + source.size();
@@ -290,7 +295,8 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
// may be the same.
// ----------------------------------------------------------------------
bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
- std::string* dest, std::string* error) {
+ absl::Nonnull<std::string*> dest,
+ absl::Nullable<std::string*> error) {
strings_internal::STLStringResizeUninitialized(dest, source.size());
ptrdiff_t dest_size;
@@ -407,7 +413,8 @@ inline size_t CEscapedLength(absl::string_view src) {
return escaped_len;
}
-void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) {
+void CEscapeAndAppendInternal(absl::string_view src,
+ absl::Nonnull<std::string*> dest) {
size_t escaped_len = CEscapedLength(src);
if (escaped_len == src.size()) {
dest->append(src.data(), src.size());
@@ -464,9 +471,10 @@ void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) {
// Reverses the mapping in Base64EscapeInternal; see that method's
// documentation for details of the mapping.
-bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
- size_t szdest, const signed char* unbase64,
- size_t* len) {
+bool Base64UnescapeInternal(absl::Nullable<const char*> src_param, size_t szsrc,
+ absl::Nullable<char*> dest, size_t szdest,
+ absl::Nonnull<const signed char*> unbase64,
+ absl::Nonnull<size_t*> len) {
static const char kPad64Equals = '=';
static const char kPad64Dot = '.';
@@ -802,8 +810,9 @@ constexpr signed char kUnWebSafeBase64[] = {
/* clang-format on */
template <typename String>
-bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
- const signed char* unbase64) {
+bool Base64UnescapeInternal(absl::Nullable<const char*> src, size_t slen,
+ absl::Nonnull<String*> dest,
+ absl::Nonnull<const signed char*> unbase64) {
// Determine the size of the output string. Base64 encodes every 3 bytes into
// 4 characters. Any leftover chars are added directly for good measure.
const size_t dest_len = 3 * (slen / 4) + (slen % 4);
@@ -847,13 +856,32 @@ constexpr char kHexValueLenient[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
+constexpr signed char kHexValueStrict[256] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // '0'..'9'
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 'A'..'F'
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 'a'..'f'
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+};
/* clang-format on */
// This is a templated function so that T can be either a char*
// or a string. This works because we use the [] operator to access
// individual characters at a time.
template <typename T>
-void HexStringToBytesInternal(const char* from, T to, size_t num) {
+void HexStringToBytesInternal(absl::Nullable<const char*> from, T to,
+ size_t num) {
for (size_t i = 0; i < num; i++) {
to[i] = static_cast<char>(kHexValueLenient[from[i * 2] & 0xFF] << 4) +
(kHexValueLenient[from[i * 2 + 1] & 0xFF]);
@@ -863,7 +891,8 @@ void HexStringToBytesInternal(const char* from, T to, size_t num) {
// This is a templated function so that T can be either a char* or a
// std::string.
template <typename T>
-void BytesToHexStringInternal(const unsigned char* src, T dest, size_t num) {
+void BytesToHexStringInternal(absl::Nullable<const unsigned char*> src, T dest,
+ size_t num) {
auto dest_ptr = &dest[0];
for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) {
const char* hex_p = &numbers_internal::kHexTable[*src_ptr * 2];
@@ -878,8 +907,8 @@ void BytesToHexStringInternal(const unsigned char* src, T dest, size_t num) {
//
// See CUnescapeInternal() for implementation details.
// ----------------------------------------------------------------------
-bool CUnescape(absl::string_view source, std::string* dest,
- std::string* error) {
+bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
+ absl::Nullable<std::string*> error) {
return CUnescapeInternal(source, kUnescapeNulls, dest, error);
}
@@ -901,21 +930,23 @@ std::string Utf8SafeCHexEscape(absl::string_view src) {
return CEscapeInternal(src, true, true);
}
-bool Base64Unescape(absl::string_view src, std::string* dest) {
+bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
}
-bool WebSafeBase64Unescape(absl::string_view src, std::string* dest) {
+bool WebSafeBase64Unescape(absl::string_view src,
+ absl::Nonnull<std::string*> dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
}
-void Base64Escape(absl::string_view src, std::string* dest) {
+void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
true, strings_internal::kBase64Chars);
}
-void WebSafeBase64Escape(absl::string_view src, std::string* dest) {
+void WebSafeBase64Escape(absl::string_view src,
+ absl::Nonnull<std::string*> dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
false, strings_internal::kWebSafeBase64Chars);
@@ -937,6 +968,32 @@ std::string WebSafeBase64Escape(absl::string_view src) {
return dest;
}
+bool HexStringToBytes(absl::string_view hex,
+ absl::Nonnull<std::string*> bytes) {
+ std::string output;
+
+ size_t num_bytes = hex.size() / 2;
+ if (hex.size() != num_bytes * 2) {
+ return false;
+ }
+
+ absl::strings_internal::STLStringResizeUninitialized(&output, num_bytes);
+ auto hex_p = hex.cbegin();
+ for (std::string::iterator bin_p = output.begin(); bin_p != output.end();
+ ++bin_p) {
+ int h1 = absl::kHexValueStrict[static_cast<size_t>(*hex_p++)];
+ int h2 = absl::kHexValueStrict[static_cast<size_t>(*hex_p++)];
+ if (h1 == -1 || h2 == -1) {
+ output.resize(static_cast<size_t>(bin_p - output.begin()));
+ return false;
+ }
+ *bin_p = static_cast<char>((h1 << 4) + h2);
+ }
+
+ *bytes = std::move(output);
+ return true;
+}
+
std::string HexStringToBytes(absl::string_view from) {
std::string result;
const auto num = from.size() / 2;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/escaping.h b/contrib/restricted/abseil-cpp/absl/strings/escaping.h
index bf2a58980a..3f34fbfc1c 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/escaping.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/escaping.h
@@ -27,7 +27,9 @@
#include <string>
#include <vector>
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
+#include "absl/base/nullability.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
@@ -65,14 +67,16 @@ ABSL_NAMESPACE_BEGIN
//
// std::string s = "foo\\rbar\\nbaz\\t";
// std::string unescaped_s;
-// if (!absl::CUnescape(s, &unescaped_s) {
+// if (!absl::CUnescape(s, &unescaped_s)) {
// ...
// }
// EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
-bool CUnescape(absl::string_view source, std::string* dest, std::string* error);
+bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
+ absl::Nullable<std::string*> error);
// Overload of `CUnescape()` with no error reporting.
-inline bool CUnescape(absl::string_view source, std::string* dest) {
+inline bool CUnescape(absl::string_view source,
+ absl::Nonnull<std::string*> dest) {
return CUnescape(source, dest, nullptr);
}
@@ -122,7 +126,7 @@ std::string Utf8SafeCHexEscape(absl::string_view src);
// Encodes a `src` string into a base64-encoded 'dest' string with padding
// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
// 2045.
-void Base64Escape(absl::string_view src, std::string* dest);
+void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest);
std::string Base64Escape(absl::string_view src);
// WebSafeBase64Escape()
@@ -130,7 +134,8 @@ std::string Base64Escape(absl::string_view src);
// Encodes a `src` string into a base64 string, like Base64Escape() does, but
// outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'.
// This function conforms with RFC 4648 section 5 (base64url).
-void WebSafeBase64Escape(absl::string_view src, std::string* dest);
+void WebSafeBase64Escape(absl::string_view src,
+ absl::Nonnull<std::string*> dest);
std::string WebSafeBase64Escape(absl::string_view src);
// Base64Unescape()
@@ -140,7 +145,7 @@ std::string WebSafeBase64Escape(absl::string_view src);
// `src` contains invalid characters, `dest` is cleared and returns `false`.
// If padding is included (note that `Base64Escape()` does produce it), it must
// be correct. In the padding, '=' and '.' are treated identically.
-bool Base64Unescape(absl::string_view src, std::string* dest);
+bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest);
// WebSafeBase64Unescape()
//
@@ -149,12 +154,24 @@ bool Base64Unescape(absl::string_view src, std::string* dest);
// invalid characters, `dest` is cleared and returns `false`. If padding is
// included (note that `WebSafeBase64Escape()` does not produce it), it must be
// correct. In the padding, '=' and '.' are treated identically.
-bool WebSafeBase64Unescape(absl::string_view src, std::string* dest);
+bool WebSafeBase64Unescape(absl::string_view src,
+ absl::Nonnull<std::string*> dest);
+
+// HexStringToBytes()
+//
+// Converts the hexadecimal encoded data in `hex` into raw bytes in the `bytes`
+// output string. If `hex` does not consist of valid hexadecimal data, this
+// function returns false and leaves `bytes` in an unspecified state. Returns
+// true on success.
+ABSL_MUST_USE_RESULT bool HexStringToBytes(absl::string_view hex,
+ absl::Nonnull<std::string*> bytes);
// HexStringToBytes()
//
// Converts an ASCII hex string into bytes, returning binary data of length
-// `from.size()/2`.
+// `from.size()/2`. The input must be valid hexadecimal data, otherwise the
+// return value is unspecified.
+ABSL_DEPRECATED("Use the HexStringToBytes() that returns a bool")
std::string HexStringToBytes(absl::string_view from);
// BytesToHexString()
diff --git a/contrib/restricted/abseil-cpp/absl/strings/has_absl_stringify.h b/contrib/restricted/abseil-cpp/absl/strings/has_absl_stringify.h
index 274a7865d1..9af0191dc5 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/has_absl_stringify.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/has_absl_stringify.h
@@ -18,6 +18,7 @@
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/strings/string_view.h"
namespace absl {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.h b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.h
index 5c0c375dbc..cb29767663 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/charconv_bigint.h
@@ -109,7 +109,17 @@ class BigUnsigned {
size_ = (std::min)(size_ + word_shift, max_words);
count %= 32;
if (count == 0) {
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=warray-bounds
+// shows a lot of bogus -Warray-bounds warnings under GCC.
+// This is not the only one in Abseil.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(14, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
std::copy_backward(words_, words_ + size_ - word_shift, words_ + size_);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(14, 0)
+#pragma GCC diagnostic pop
+#endif
} else {
for (int i = (std::min)(size_, max_words - 1); i > word_shift; --i) {
words_[i] = (words_[i - word_shift] << count) |
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
index 8744540e2a..f0060f1015 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_internal.h
@@ -85,7 +85,7 @@ enum Constants {
};
// Emits a fatal error "Unexpected node type: xyz" and aborts the program.
-ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
+[[noreturn]] void LogFatalNodeType(CordRep* rep);
// Fast implementation of memmove for up to 15 bytes. This implementation is
// safe for overlapping regions. If nullify_tail is true, the destination is
@@ -259,7 +259,7 @@ struct CordRep {
// on the specific layout of these fields. Notably: the non-trivial field
// `refcount` being preceded by `length`, and being tailed by POD data
// members only.
- // # LINT.IfChange
+ // LINT.IfChange
size_t length;
RefcountAndFlags refcount;
// If tag < FLAT, it represents CordRepKind and indicates the type of node.
@@ -275,7 +275,7 @@ struct CordRep {
// allocate room for these in the derived class, as not all compilers reuse
// padding space from the base class (clang and gcc do, MSVC does not, etc)
uint8_t storage[3];
- // # LINT.ThenChange(cord_rep_btree.h:copy_raw)
+ // LINT.ThenChange(cord_rep_btree.h:copy_raw)
// Returns true if this instance's tag matches the requested type.
constexpr bool IsSubstring() const { return tag == SUBSTRING; }
@@ -352,18 +352,19 @@ struct CordRepExternal : public CordRep {
static void Delete(CordRep* rep);
};
-struct Rank1 {};
-struct Rank0 : Rank1 {};
+// Use go/ranked-overloads for dispatching.
+struct Rank0 {};
+struct Rank1 : Rank0 {};
template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
Releaser, absl::string_view>>
-void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) {
+void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view data) {
::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
}
template <typename Releaser,
typename = ::absl::base_internal::invoke_result_t<Releaser>>
-void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) {
+void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view) {
::absl::base_internal::invoke(std::forward<Releaser>(releaser));
}
@@ -381,7 +382,7 @@ struct CordRepExternalImpl
}
~CordRepExternalImpl() {
- InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
+ InvokeReleaser(Rank1{}, std::move(this->template get<0>()),
absl::string_view(base, length));
}
@@ -398,7 +399,6 @@ inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos,
assert(pos < child->length);
assert(n <= child->length - pos);
- // TODO(b/217376272): Harden internal logic.
// Move to strategical places inside the Cord logic and make this an assert.
if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
LogFatalNodeType(child);
@@ -520,6 +520,7 @@ class InlineData {
constexpr InlineData(const InlineData& rhs) noexcept;
InlineData& operator=(const InlineData& rhs) noexcept;
+ friend void swap(InlineData& lhs, InlineData& rhs) noexcept;
friend bool operator==(const InlineData& lhs, const InlineData& rhs) {
#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
@@ -770,6 +771,12 @@ class InlineData {
char data[kMaxInline + 1];
AsTree as_tree;
};
+
+ // TODO(b/145829486): see swap(InlineData, InlineData) for more info.
+ inline void SwapValue(Rep rhs, Rep& refrhs) {
+ memcpy(&refrhs, this, sizeof(*this));
+ memcpy(this, &rhs, sizeof(*this));
+ }
};
// Private implementation of `Compare()`
@@ -884,6 +891,19 @@ inline void CordRep::Unref(CordRep* rep) {
}
}
+inline void swap(InlineData& lhs, InlineData& rhs) noexcept {
+ lhs.unpoison();
+ rhs.unpoison();
+ // TODO(b/145829486): `std::swap(lhs.rep_, rhs.rep_)` results in bad codegen
+ // on clang, spilling the temporary swap value on the stack. Since `Rep` is
+ // trivial, we can make clang DTRT by calling a hand-rolled `SwapValue` where
+ // we pass `rhs` both by value (register allocated) and by reference. The IR
+ // then folds and inlines correctly into an optimized swap without spill.
+ lhs.rep_.SwapValue(rhs.rep_, rhs.rep_);
+ rhs.poison();
+ lhs.poison();
+}
+
} // namespace cord_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.h
index be94b62ec8..ab259afe64 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cord_rep_btree.h
@@ -684,14 +684,14 @@ inline CordRepBtree* CordRepBtree::CopyRaw(size_t new_length) const {
// except `refcount` is trivially copyable, and the compiler does not
// efficiently coalesce member-wise copy of these members.
// See https://gcc.godbolt.org/z/qY8zsca6z
- // # LINT.IfChange(copy_raw)
+ // LINT.IfChange(copy_raw)
tree->length = new_length;
uint8_t* dst = &tree->tag;
const uint8_t* src = &tag;
const ptrdiff_t offset = src - reinterpret_cast<const uint8_t*>(this);
memcpy(dst, src, sizeof(CordRepBtree) - static_cast<size_t>(offset));
return tree;
- // # LINT.ThenChange()
+ // LINT.ThenChange()
}
inline CordRepBtree* CordRepBtree::Copy() const {
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.cc
index 20d314f03c..6033d04696 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.cc
@@ -40,13 +40,15 @@ std::atomic<int> g_cordz_mean_interval(50000);
// Special negative 'not initialized' per thread value for cordz_next_sample.
static constexpr int64_t kInitCordzNextSample = -1;
-ABSL_CONST_INIT thread_local int64_t cordz_next_sample = kInitCordzNextSample;
+ABSL_CONST_INIT thread_local SamplingState cordz_next_sample = {
+ kInitCordzNextSample, 1};
// kIntervalIfDisabled is the number of profile-eligible events need to occur
// before the code will confirm that cordz is still disabled.
constexpr int64_t kIntervalIfDisabled = 1 << 16;
-ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() {
+ABSL_ATTRIBUTE_NOINLINE int64_t
+cordz_should_profile_slow(SamplingState& state) {
thread_local absl::profiling_internal::ExponentialBiased
exponential_biased_generator;
@@ -55,30 +57,34 @@ ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() {
// Check if we disabled profiling. If so, set the next sample to a "large"
// number to minimize the overhead of the should_profile codepath.
if (mean_interval <= 0) {
- cordz_next_sample = kIntervalIfDisabled;
- return false;
+ state = {kIntervalIfDisabled, kIntervalIfDisabled};
+ return 0;
}
// Check if we're always sampling.
if (mean_interval == 1) {
- cordz_next_sample = 1;
- return true;
+ state = {1, 1};
+ return 1;
}
- if (cordz_next_sample <= 0) {
+ if (cordz_next_sample.next_sample <= 0) {
// If first check on current thread, check cordz_should_profile()
// again using the created (initial) stride in cordz_next_sample.
- const bool initialized = cordz_next_sample != kInitCordzNextSample;
- cordz_next_sample = exponential_biased_generator.GetStride(mean_interval);
- return initialized || cordz_should_profile();
+ const bool initialized =
+ cordz_next_sample.next_sample != kInitCordzNextSample;
+ auto old_stride = state.sample_stride;
+ auto stride = exponential_biased_generator.GetStride(mean_interval);
+ state = {stride, stride};
+ bool should_sample = initialized || cordz_should_profile() > 0;
+ return should_sample ? old_stride : 0;
}
- --cordz_next_sample;
- return false;
+ --state.next_sample;
+ return 0;
}
void cordz_set_next_sample_for_testing(int64_t next_sample) {
- cordz_next_sample = next_sample;
+ cordz_next_sample = {next_sample, next_sample};
}
#endif // ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.h
index ed108bf1b4..84c185e407 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_functions.h
@@ -41,23 +41,33 @@ void set_cordz_mean_interval(int32_t mean_interval);
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
+struct SamplingState {
+ int64_t next_sample;
+ int64_t sample_stride;
+};
+
// cordz_next_sample is the number of events until the next sample event. If
// the value is 1 or less, the code will check on the next event if cordz is
// enabled, and if so, will sample the Cord. cordz is only enabled when we can
// use thread locals.
-ABSL_CONST_INIT extern thread_local int64_t cordz_next_sample;
-
-// Determines if the next sample should be profiled. If it is, the value pointed
-// at by next_sample will be set with the interval until the next sample.
-bool cordz_should_profile_slow();
-
-// Returns true if the next cord should be sampled.
-inline bool cordz_should_profile() {
- if (ABSL_PREDICT_TRUE(cordz_next_sample > 1)) {
- cordz_next_sample--;
- return false;
+ABSL_CONST_INIT extern thread_local SamplingState cordz_next_sample;
+
+// Determines if the next sample should be profiled.
+// Returns:
+// 0: Do not sample
+// >0: Sample with the stride of the last sampling period
+int64_t cordz_should_profile_slow(SamplingState& state);
+
+// Determines if the next sample should be profiled.
+// Returns:
+// 0: Do not sample
+// >0: Sample with the stride of the last sampling period
+inline int64_t cordz_should_profile() {
+ if (ABSL_PREDICT_TRUE(cordz_next_sample.next_sample > 1)) {
+ cordz_next_sample.next_sample--;
+ return 0;
}
- return cordz_should_profile_slow();
+ return cordz_should_profile_slow(cordz_next_sample);
}
// Sets the interval until the next sample (for testing only)
@@ -65,7 +75,7 @@ void cordz_set_next_sample_for_testing(int64_t next_sample);
#else // ABSL_INTERNAL_CORDZ_ENABLED
-inline bool cordz_should_profile() { return false; }
+inline int64_t cordz_should_profile() { return 0; }
inline void cordz_set_next_sample_for_testing(int64_t) {}
#endif // ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle.cc
index a7061dbe22..53d5f529a7 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_handle.cc
@@ -16,6 +16,7 @@
#include <atomic>
#include "absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
+#include "absl/base/no_destructor.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@@ -43,33 +44,32 @@ struct Queue {
}
};
-static Queue* GlobalQueue() {
- static Queue* global_queue = new Queue;
- return global_queue;
+static Queue& GlobalQueue() {
+ static absl::NoDestructor<Queue> global_queue;
+ return *global_queue;
}
} // namespace
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
- Queue* global_queue = GlobalQueue();
+ Queue& global_queue = GlobalQueue();
if (is_snapshot) {
- MutexLock lock(&global_queue->mutex);
- CordzHandle* dq_tail =
- global_queue->dq_tail.load(std::memory_order_acquire);
+ MutexLock lock(&global_queue.mutex);
+ CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
- global_queue->dq_tail.store(this, std::memory_order_release);
+ global_queue.dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
- Queue* global_queue = GlobalQueue();
+ Queue& global_queue = GlobalQueue();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
- MutexLock lock(&global_queue->mutex);
+ MutexLock lock(&global_queue.mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach
@@ -85,7 +85,7 @@ CordzHandle::~CordzHandle() {
if (next) {
next->dq_prev_ = dq_prev_;
} else {
- global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
+ global_queue.dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
@@ -95,20 +95,20 @@ CordzHandle::~CordzHandle() {
}
bool CordzHandle::SafeToDelete() const {
- return is_snapshot_ || GlobalQueue()->IsEmpty();
+ return is_snapshot_ || GlobalQueue().IsEmpty();
}
void CordzHandle::Delete(CordzHandle* handle) {
assert(handle);
if (handle) {
- Queue* const queue = GlobalQueue();
+ Queue& queue = GlobalQueue();
if (!handle->SafeToDelete()) {
- MutexLock lock(&queue->mutex);
- CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
+ MutexLock lock(&queue.mutex);
+ CordzHandle* dq_tail = queue.dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
dq_tail->dq_next_ = handle;
- queue->dq_tail.store(handle, std::memory_order_release);
+ queue.dq_tail.store(handle, std::memory_order_release);
return;
}
}
@@ -118,9 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
- Queue* global_queue = GlobalQueue();
- MutexLock lock(&global_queue->mutex);
- CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
+ Queue& global_queue = GlobalQueue();
+ MutexLock lock(&global_queue.mutex);
+ CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
@@ -133,9 +133,9 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
- Queue* global_queue = GlobalQueue();
- MutexLock lock(&global_queue->mutex);
- for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
+ Queue& global_queue = GlobalQueue();
+ MutexLock lock(&global_queue.mutex);
+ for (const CordzHandle* p = global_queue.dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
@@ -150,8 +150,8 @@ CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
return handles;
}
- Queue* global_queue = GlobalQueue();
- MutexLock lock(&global_queue->mutex);
+ Queue& global_queue = GlobalQueue();
+ MutexLock lock(&global_queue.mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
index b24c3da712..b7c7fed9fa 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.cc
@@ -14,6 +14,8 @@
#include "absl/strings/internal/cordz_info.h"
+#include <cstdint>
+
#include "absl/base/config.h"
#include "absl/base/internal/spinlock.h"
#include "absl/container/inlined_vector.h"
@@ -247,10 +249,12 @@ CordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {
return next;
}
-void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method) {
+void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,
+ int64_t sampling_stride) {
assert(cord.is_tree());
assert(!cord.is_profiled());
- CordzInfo* cordz_info = new CordzInfo(cord.as_tree(), nullptr, method);
+ CordzInfo* cordz_info =
+ new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
@@ -266,7 +270,8 @@ void CordzInfo::TrackCord(InlineData& cord, const InlineData& src,
if (cordz_info != nullptr) cordz_info->Untrack();
// Start new cord sample
- cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method);
+ cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method,
+ src.cordz_info()->sampling_stride());
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
@@ -298,9 +303,8 @@ size_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {
return src->stack_depth_;
}
-CordzInfo::CordzInfo(CordRep* rep,
- const CordzInfo* src,
- MethodIdentifier method)
+CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,
+ MethodIdentifier method, int64_t sampling_stride)
: rep_(rep),
stack_depth_(
static_cast<size_t>(absl::GetStackTrace(stack_,
@@ -309,7 +313,8 @@ CordzInfo::CordzInfo(CordRep* rep,
parent_stack_depth_(FillParentStack(src, parent_stack_)),
method_(method),
parent_method_(GetParentMethod(src)),
- create_time_(absl::Now()) {
+ create_time_(absl::Now()),
+ sampling_stride_(sampling_stride) {
update_tracker_.LossyAdd(method);
if (src) {
// Copy parent counters.
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.h b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.h
index 17eaa91c77..2dc9d16def 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/cordz_info.h
@@ -60,7 +60,8 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle {
// and/or deleted. `method` identifies the Cord public API method initiating
// the cord to be sampled.
// Requires `cord` to hold a tree, and `cord.cordz_info()` to be null.
- static void TrackCord(InlineData& cord, MethodIdentifier method);
+ static void TrackCord(InlineData& cord, MethodIdentifier method,
+ int64_t sampling_stride);
// Identical to TrackCord(), except that this function fills the
// `parent_stack` and `parent_method` properties of the returned CordzInfo
@@ -181,6 +182,8 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle {
// or RemovePrefix.
CordzStatistics GetCordzStatistics() const;
+ int64_t sampling_stride() const { return sampling_stride_; }
+
private:
using SpinLock = absl::base_internal::SpinLock;
using SpinLockHolder = ::absl::base_internal::SpinLockHolder;
@@ -199,7 +202,7 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle {
static constexpr size_t kMaxStackDepth = 64;
explicit CordzInfo(CordRep* rep, const CordzInfo* src,
- MethodIdentifier method);
+ MethodIdentifier method, int64_t weight);
~CordzInfo() override;
// Sets `rep_` without holding a lock.
@@ -250,12 +253,14 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle {
const MethodIdentifier parent_method_;
CordzUpdateTracker update_tracker_;
const absl::Time create_time_;
+ const int64_t sampling_stride_;
};
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
InlineData& cord, MethodIdentifier method) {
- if (ABSL_PREDICT_FALSE(cordz_should_profile())) {
- TrackCord(cord, method);
+ auto stride = cordz_should_profile();
+ if (ABSL_PREDICT_FALSE(stride > 0)) {
+ TrackCord(cord, method, stride);
}
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/escaping.cc b/contrib/restricted/abseil-cpp/absl/strings/internal/escaping.cc
index 56a4cbed03..d2abe66984 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/escaping.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/escaping.cc
@@ -14,6 +14,8 @@
#include "absl/strings/internal/escaping.h"
+#include <limits>
+
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
@@ -31,12 +33,14 @@ ABSL_CONST_INIT const char kBase64Chars[] =
ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
// Base64 encodes three bytes of input at a time. If the input is not
// divisible by three, we pad as appropriate.
//
// Base64 encodes each three bytes of input into four bytes of output.
+ constexpr size_t kMaxSize = (std::numeric_limits<size_t>::max() - 1) / 4 * 3;
+ ABSL_INTERNAL_CHECK(input_len <= kMaxSize,
+ "CalculateBase64EscapedLenInternal() overflow");
size_t len = (input_len / 3) * 4;
// Since all base 64 input is an integral number of octets, only the following
@@ -66,7 +70,6 @@ size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
}
}
- assert(len >= input_len); // make sure we didn't overflow
return len;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
index d97d5033d8..3e730c7aab 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_join_internal.h
@@ -31,16 +31,23 @@
#ifndef ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
#define ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
+#include <cstdint>
#include <cstring>
+#include <initializer_list>
#include <iterator>
+#include <limits>
#include <memory>
#include <string>
+#include <tuple>
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/strings/internal/ostringstream.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -230,14 +237,19 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s,
if (start != end) {
// Sums size
auto&& start_value = *start;
- size_t result_size = start_value.size();
+ // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+ // in memory strings to overflow a uint64_t.
+ uint64_t result_size = start_value.size();
for (Iterator it = start; ++it != end;) {
result_size += s.size();
result_size += (*it).size();
}
if (result_size > 0) {
- STLStringResizeUninitialized(&result, result_size);
+ constexpr uint64_t kMaxSize =
+ uint64_t{(std::numeric_limits<size_t>::max)()};
+ ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
+ STLStringResizeUninitialized(&result, static_cast<size_t>(result_size));
// Joins strings
char* result_buf = &*result.begin();
@@ -310,6 +322,15 @@ std::string JoinRange(const Range& range, absl::string_view separator) {
return JoinRange(begin(range), end(range), separator);
}
+template <typename Tuple, std::size_t... I>
+std::string JoinTuple(const Tuple& value, absl::string_view separator,
+ std::index_sequence<I...>) {
+ return JoinRange(
+ std::initializer_list<absl::string_view>{
+ static_cast<const AlphaNum&>(std::get<I>(value)).Piece()...},
+ separator);
+}
+
} // namespace strings_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h b/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
index 081ad85ad4..11ea96f2c1 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/internal/str_split_internal.h
@@ -30,6 +30,7 @@
#define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_
#include <array>
+#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <tuple>
@@ -402,7 +403,10 @@ class Splitter {
ar[index].size = it->size();
++it;
} while (++index != ar.size() && !it.at_end());
- v.insert(v.end(), ar.begin(), ar.begin() + index);
+ // We static_cast index to a signed type to work around overzealous
+ // compiler warnings about signedness.
+ v.insert(v.end(), ar.begin(),
+ ar.begin() + static_cast<ptrdiff_t>(index));
}
return v;
}
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
index 882c3a8b85..b57d9e82e5 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.cc
@@ -20,9 +20,7 @@
#include <algorithm>
#include <cassert>
#include <cfloat> // for DBL_DIG and FLT_DIG
-#include <climits>
#include <cmath> // for HUGE_VAL
-#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
@@ -30,7 +28,6 @@
#include <iterator>
#include <limits>
#include <system_error> // NOLINT(build/c++11)
-#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
@@ -159,71 +156,28 @@ constexpr uint32_t kTwoZeroBytes = 0x0101 * '0';
constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
-template <typename T>
-constexpr T Pow(T base, uint32_t n) {
- // Exponentiation by squaring
- return static_cast<T>((n > 1 ? Pow(base * base, n >> 1) : static_cast<T>(1)) *
- ((n & 1) ? base : static_cast<T>(1)));
-}
-
-// Given n, calculates C where the following holds for all 0 <= x < Pow(100, n):
-// x / Pow(10, n) == x * C / Pow(2, n * 10)
-// In other words, it allows us to divide by a power of 10 via a single
-// multiplication and bit shifts, assuming the input will be smaller than the
-// square of that power of 10.
-template <typename T>
-constexpr T ComputePowerOf100DivisionCoefficient(uint32_t n) {
- if (n > 4) {
- // This doesn't work for large powers of 100, due to overflow
- abort();
- }
- T denom = 16 - 1;
- T num = (denom + 1) - 10;
- T gcd = 3; // Greatest common divisor of numerator and denominator
- denom = Pow(denom / gcd, n);
- num = Pow(num / gcd, 9 * n);
- T quotient = num / denom;
- if (num % denom >= denom / 2) {
- // Round up, since the remainder is more than half the denominator
- ++quotient;
- }
- return quotient;
-}
-
-// * kDivisionBy10Mul / kDivisionBy10Div is a division by 10 for values from 0
-// to 99. It's also a division of a structure [k takes 2 bytes][m takes 2
-// bytes], then * kDivisionBy10Mul / kDivisionBy10Div will be [k / 10][m / 10].
-// It allows parallel division.
-constexpr uint64_t kDivisionBy10Mul =
- ComputePowerOf100DivisionCoefficient<uint64_t>(1);
-static_assert(kDivisionBy10Mul == 103,
- "division coefficient for 10 is incorrect");
+// * 103 / 1024 is a division by 10 for values from 0 to 99. It's also a
+// division of a structure [k takes 2 bytes][m takes 2 bytes], then * 103 / 1024
+// will be [k / 10][m / 10]. It allows parallel division.
+constexpr uint64_t kDivisionBy10Mul = 103u;
constexpr uint64_t kDivisionBy10Div = 1 << 10;
-// * kDivisionBy100Mul / kDivisionBy100Div is a division by 100 for values from
-// 0 to 9999.
-constexpr uint64_t kDivisionBy100Mul =
- ComputePowerOf100DivisionCoefficient<uint64_t>(2);
-static_assert(kDivisionBy100Mul == 10486,
- "division coefficient for 100 is incorrect");
+// * 10486 / 1048576 is a division by 100 for values from 0 to 9999.
+constexpr uint64_t kDivisionBy100Mul = 10486u;
constexpr uint64_t kDivisionBy100Div = 1 << 20;
-static_assert(ComputePowerOf100DivisionCoefficient<uint64_t>(3) == 1073742,
- "division coefficient for 1000 is incorrect");
-
-// Same as `PrepareEightDigits`, but produces 2 digits for integers < 100.
-inline uint32_t PrepareTwoDigitsImpl(uint32_t i, bool reversed) {
- assert(i < 100);
- uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
- uint32_t mod10 = i - 10u * div10;
- return (div10 << (reversed ? 8 : 0)) + (mod10 << (reversed ? 0 : 8));
-}
-inline uint32_t PrepareTwoDigits(uint32_t i) {
- return PrepareTwoDigitsImpl(i, false);
+// Encode functions write the ASCII output of input `n` to `out_str`.
+inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
+ int num_digits = static_cast<int>(n - 10) >> 8;
+ uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = n - 10u * div10;
+ uint32_t base = kTwoZeroBytes + div10 + (mod10 << 8);
+ base >>= num_digits & 8;
+ little_endian::Store16(out_str, static_cast<uint16_t>(base));
+ return out_str + 2 + num_digits;
}
-// Same as `PrepareEightDigits`, but produces 4 digits for integers < 10000.
-inline uint32_t PrepareFourDigitsImpl(uint32_t n, bool reversed) {
+inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) {
// We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
// blocks. 123 -> [\0\1][\0\23]. We divide by 10 both blocks
// (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
@@ -231,19 +185,22 @@ inline uint32_t PrepareFourDigitsImpl(uint32_t n, bool reversed) {
// strip trailing zeros, add ASCII '0000' and return.
uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
uint32_t mod100 = n - 100ull * div100;
- uint32_t hundreds =
- (mod100 << (reversed ? 0 : 16)) + (div100 << (reversed ? 16 : 0));
+ uint32_t hundreds = (mod100 << 16) + div100;
uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
tens &= (0xFull << 16) | 0xFull;
- tens = (tens << (reversed ? 8 : 0)) +
- static_cast<uint32_t>((hundreds - 10ull * tens) << (reversed ? 0 : 8));
- return tens;
-}
-inline uint32_t PrepareFourDigits(uint32_t n) {
- return PrepareFourDigitsImpl(n, false);
-}
-inline uint32_t PrepareFourDigitsReversed(uint32_t n) {
- return PrepareFourDigitsImpl(n, true);
+ tens += (hundreds - 10ull * tens) << 8;
+ ABSL_ASSUME(tens != 0);
+ // The result can contain trailing zero bits, we need to strip them to a first
+ // significant byte in a final representation. For example, for n = 123, we
+ // have tens to have representation \0\1\2\3. We do `& -8` to round
+ // to a multiple to 8 to strip zero bytes, not all zero bits.
+ // countr_zero to help.
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8u);
+ tens += kFourZeroBytes;
+ tens >>= zeroes;
+ little_endian::Store32(out_str, tens);
+ return out_str + sizeof(tens) - zeroes / 8;
}
// Helper function to produce an ASCII representation of `i`.
@@ -259,309 +216,126 @@ inline uint32_t PrepareFourDigitsReversed(uint32_t n) {
// // Note two leading zeros:
// EXPECT_EQ(absl::string_view(ascii, 8), "00102030");
//
-// If `Reversed` is set to true, the result becomes reversed to "03020100".
-//
// Pre-condition: `i` must be less than 100000000.
-inline uint64_t PrepareEightDigitsImpl(uint32_t i, bool reversed) {
+inline uint64_t PrepareEightDigits(uint32_t i) {
ABSL_ASSUME(i < 10000'0000);
// Prepare 2 blocks of 4 digits "in parallel".
uint32_t hi = i / 10000;
uint32_t lo = i % 10000;
- uint64_t merged = (uint64_t{hi} << (reversed ? 32 : 0)) |
- (uint64_t{lo} << (reversed ? 0 : 32));
+ uint64_t merged = hi | (uint64_t{lo} << 32);
uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
((0x7Full << 32) | 0x7Full);
uint64_t mod100 = merged - 100ull * div100;
- uint64_t hundreds =
- (mod100 << (reversed ? 0 : 16)) + (div100 << (reversed ? 16 : 0));
+ uint64_t hundreds = (mod100 << 16) + div100;
uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
- tens = (tens << (reversed ? 8 : 0)) +
- ((hundreds - 10ull * tens) << (reversed ? 0 : 8));
+ tens += (hundreds - 10ull * tens) << 8;
return tens;
}
-inline uint64_t PrepareEightDigits(uint32_t i) {
- return PrepareEightDigitsImpl(i, false);
-}
-inline uint64_t PrepareEightDigitsReversed(uint32_t i) {
- return PrepareEightDigitsImpl(i, true);
-}
-template <typename T, typename BackwardIt>
-class FastUIntToStringConverter {
- static_assert(
- std::is_same<T, decltype(+std::declval<T>())>::value,
- "to avoid code bloat, only instantiate this for int and larger types");
- static_assert(std::is_unsigned<T>::value,
- "this class is only for unsigned types");
-
- public:
- // Outputs the given number backward (like with std::copy_backward),
- // starting from the end of the string.
- // The number of digits in the number must have been already measured and
- // passed *exactly*, otherwise the behavior is undefined.
- // (This is an optimization, as calculating the number of digits again would
- // slow down the hot path.)
- // Returns an iterator to the start of the suffix that was appended.
- static BackwardIt FastIntToBufferBackward(T v, BackwardIt end) {
- // THIS IS A HOT FUNCTION with a very deliberate structure to exploit branch
- // prediction and shorten the critical path for smaller numbers.
- // Do not move around the if/else blocks or attempt to simplify it
- // without benchmarking any changes.
-
- if (v < 10) {
- goto AT_LEAST_1 /* NOTE: mandatory for the 0 case */;
- }
- if (v < 1000) {
- goto AT_LEAST_10;
- }
- if (v < 10000000) {
- goto AT_LEAST_1000;
- }
-
- if (v >= 100000000 / 10) {
- if (v >= 10000000000000000 / 10) {
- DoFastIntToBufferBackward<8>(v, end);
- }
- DoFastIntToBufferBackward<8>(v, end);
- }
-
- if (v >= 10000 / 10) {
- AT_LEAST_1000:
- DoFastIntToBufferBackward<4>(v, end);
- }
-
- if (v >= 100 / 10) {
- AT_LEAST_10:
- DoFastIntToBufferBackward<2>(v, end);
- }
-
- if (v >= 10 / 10) {
- AT_LEAST_1:
- end = DoFastIntToBufferBackward(v, end, std::integral_constant<int, 1>());
- }
- return end;
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32(
+ uint32_t n, absl::Nonnull<char*> out_str) {
+ if (n < 10) {
+ *out_str = static_cast<char>('0' + n);
+ return out_str + 1;
}
-
- private:
- // Only assume pointers are contiguous for now. String and vector iterators
- // could be special-cased as well, but there's no need for them here.
- // With C++20 we can probably switch to std::contiguous_iterator_tag.
- static constexpr bool kIsContiguousIterator =
- std::is_pointer<BackwardIt>::value;
-
- template <int Exponent>
- static void DoFastIntToBufferBackward(T& v, BackwardIt& end) {
- constexpr T kModulus = Pow<T>(10, Exponent);
- T remainder = static_cast<T>(v % kModulus);
- v = static_cast<T>(v / kModulus);
- end = DoFastIntToBufferBackward(remainder, end,
- std::integral_constant<int, Exponent>());
- }
-
- static BackwardIt DoFastIntToBufferBackward(const T&, BackwardIt end,
- std::integral_constant<int, 0>) {
- return end;
- }
-
- static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
- std::integral_constant<int, 1>) {
- *--end = static_cast<char>('0' + v);
- return DoFastIntToBufferBackward(v, end, std::integral_constant<int, 0>());
- }
-
- static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
- std::integral_constant<int, 4>) {
- if (kIsContiguousIterator) {
- const uint32_t digits =
- PrepareFourDigits(static_cast<uint32_t>(v)) + kFourZeroBytes;
- end -= sizeof(digits);
- little_endian::Store32(&*end, digits);
- } else {
- uint32_t digits =
- PrepareFourDigitsReversed(static_cast<uint32_t>(v)) + kFourZeroBytes;
- for (size_t i = 0; i < sizeof(digits); ++i) {
- *--end = static_cast<char>(digits);
- digits >>= CHAR_BIT;
- }
- }
- return end;
+ if (n < 100'000'000) {
+ uint64_t bottom = PrepareEightDigits(n);
+ ABSL_ASSUME(bottom != 0);
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes =
+ static_cast<uint32_t>(absl::countr_zero(bottom)) & (0 - 8u);
+ little_endian::Store64(out_str, (bottom + kEightZeroBytes) >> zeroes);
+ return out_str + sizeof(bottom) - zeroes / 8;
}
-
- static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
- std::integral_constant<int, 8>) {
- if (kIsContiguousIterator) {
- const uint64_t digits =
- PrepareEightDigits(static_cast<uint32_t>(v)) + kEightZeroBytes;
- end -= sizeof(digits);
- little_endian::Store64(&*end, digits);
- } else {
- uint64_t digits = PrepareEightDigitsReversed(static_cast<uint32_t>(v)) +
- kEightZeroBytes;
- for (size_t i = 0; i < sizeof(digits); ++i) {
- *--end = static_cast<char>(digits);
- digits >>= CHAR_BIT;
- }
- }
- return end;
- }
-
- template <int Digits>
- static BackwardIt DoFastIntToBufferBackward(
- T v, BackwardIt end, std::integral_constant<int, Digits>) {
- constexpr int kLogModulus = Digits - Digits / 2;
- constexpr T kModulus = Pow(static_cast<T>(10), kLogModulus);
- bool is_safe_to_use_division_trick = Digits <= 8;
- T quotient, remainder;
- if (is_safe_to_use_division_trick) {
- constexpr uint64_t kCoefficient =
- ComputePowerOf100DivisionCoefficient<uint64_t>(kLogModulus);
- quotient = (v * kCoefficient) >> (10 * kLogModulus);
- remainder = v - quotient * kModulus;
- } else {
- quotient = v / kModulus;
- remainder = v % kModulus;
- }
- end = DoFastIntToBufferBackward(remainder, end,
- std::integral_constant<int, kLogModulus>());
- return DoFastIntToBufferBackward(
- quotient, end, std::integral_constant<int, Digits - kLogModulus>());
- }
-};
-
-// Returns an iterator to the start of the suffix that was appended
-template <typename T, typename BackwardIt>
-std::enable_if_t<std::is_unsigned<T>::value, BackwardIt>
-DoFastIntToBufferBackward(T v, BackwardIt end, uint32_t digits) {
- using PromotedT = std::decay_t<decltype(+v)>;
- using Converter = FastUIntToStringConverter<PromotedT, BackwardIt>;
- (void)digits;
- return Converter().FastIntToBufferBackward(v, end);
+ uint32_t div08 = n / 100'000'000;
+ uint32_t mod08 = n % 100'000'000;
+ uint64_t bottom = PrepareEightDigits(mod08) + kEightZeroBytes;
+ out_str = EncodeHundred(div08, out_str);
+ little_endian::Store64(out_str, bottom);
+ return out_str + sizeof(bottom);
}
-template <typename T, typename BackwardIt>
-std::enable_if_t<std::is_signed<T>::value, BackwardIt>
-DoFastIntToBufferBackward(T v, BackwardIt end, uint32_t digits) {
- if (absl::numbers_internal::IsNegative(v)) {
- // Store the minus sign *before* we produce the number itself, not after.
- // This gets us a tail call.
- end[-static_cast<ptrdiff_t>(digits) - 1] = '-';
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* EncodeFullU64(uint64_t i,
+ char* buffer) {
+ if (i <= std::numeric_limits<uint32_t>::max()) {
+ return EncodeFullU32(static_cast<uint32_t>(i), buffer);
}
- return DoFastIntToBufferBackward(
- absl::numbers_internal::UnsignedAbsoluteValue(v), end, digits);
-}
-
-template <class T>
-std::enable_if_t<std::is_integral<T>::value, int>
-GetNumDigitsOrNegativeIfNegativeImpl(T v) {
- const auto /* either bool or std::false_type */ is_negative =
- absl::numbers_internal::IsNegative(v);
- const int digits = static_cast<int>(absl::numbers_internal::Base10Digits(
- absl::numbers_internal::UnsignedAbsoluteValue(v)));
- return is_negative ? ~digits : digits;
+ uint32_t mod08;
+ if (i < 1'0000'0000'0000'0000ull) {
+ uint32_t div08 = static_cast<uint32_t>(i / 100'000'000ull);
+ mod08 = static_cast<uint32_t>(i % 100'000'000ull);
+ buffer = EncodeFullU32(div08, buffer);
+ } else {
+ uint64_t div08 = i / 100'000'000ull;
+ mod08 = static_cast<uint32_t>(i % 100'000'000ull);
+ uint32_t div016 = static_cast<uint32_t>(div08 / 100'000'000ull);
+ uint32_t div08mod08 = static_cast<uint32_t>(div08 % 100'000'000ull);
+ uint64_t mid_result = PrepareEightDigits(div08mod08) + kEightZeroBytes;
+ buffer = EncodeTenThousand(div016, buffer);
+ little_endian::Store64(buffer, mid_result);
+ buffer += sizeof(mid_result);
+ }
+ uint64_t mod_result = PrepareEightDigits(mod08) + kEightZeroBytes;
+ little_endian::Store64(buffer, mod_result);
+ return buffer + sizeof(mod_result);
}
} // namespace
void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
- little_endian::Store16(
- buf, static_cast<uint16_t>(PrepareTwoDigits(i) + kTwoZeroBytes));
+ assert(i < 100);
+ uint32_t base = kTwoZeroBytes;
+ uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = i - 10u * div10;
+ base += div10 + (mod10 << 8);
+ little_endian::Store16(buf, static_cast<uint16_t>(base));
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
- uint32_t i, absl::Nonnull<char*> buffer) {
- const uint32_t digits = absl::numbers_internal::Base10Digits(i);
- buffer += digits;
- *buffer = '\0'; // We're going backward, so store this first
- FastIntToBufferBackward(i, buffer, digits);
- return buffer;
+ uint32_t n, absl::Nonnull<char*> out_str) {
+ out_str = EncodeFullU32(n, out_str);
+ *out_str = '\0';
+ return out_str;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
int32_t i, absl::Nonnull<char*> buffer) {
- buffer += static_cast<int>(i < 0);
- uint32_t digits = absl::numbers_internal::Base10Digits(
- absl::numbers_internal::UnsignedAbsoluteValue(i));
- buffer += digits;
- *buffer = '\0'; // We're going backward, so store this first
- FastIntToBufferBackward(i, buffer, digits);
+ uint32_t u = static_cast<uint32_t>(i);
+ if (i < 0) {
+ *buffer++ = '-';
+ // We need to do the negation in modular (i.e., "unsigned")
+ // arithmetic; MSVC++ apparently warns for plain "-u", so
+ // we write the equivalent expression "0 - u" instead.
+ u = 0 - u;
+ }
+ buffer = EncodeFullU32(u, buffer);
+ *buffer = '\0';
return buffer;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
uint64_t i, absl::Nonnull<char*> buffer) {
- uint32_t digits = absl::numbers_internal::Base10Digits(i);
- buffer += digits;
- *buffer = '\0'; // We're going backward, so store this first
- FastIntToBufferBackward(i, buffer, digits);
+ buffer = EncodeFullU64(i, buffer);
+ *buffer = '\0';
return buffer;
}
absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
int64_t i, absl::Nonnull<char*> buffer) {
- buffer += static_cast<int>(i < 0);
- uint32_t digits = absl::numbers_internal::Base10Digits(
- absl::numbers_internal::UnsignedAbsoluteValue(i));
- buffer += digits;
- *buffer = '\0'; // We're going backward, so store this first
- FastIntToBufferBackward(i, buffer, digits);
+ uint64_t u = static_cast<uint64_t>(i);
+ if (i < 0) {
+ *buffer++ = '-';
+ // We need to do the negation in modular (i.e., "unsigned")
+ // arithmetic; MSVC++ apparently warns for plain "-u", so
+ // we write the equivalent expression "0 - u" instead.
+ u = 0 - u;
+ }
+ buffer = EncodeFullU64(u, buffer);
+ *buffer = '\0';
return buffer;
}
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
- uint32_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
- return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
- int32_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
- return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
- uint64_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
- return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
- int64_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
- return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(signed char v) {
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(unsigned char v) {
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(short v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
- unsigned short v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(int v) {
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(unsigned int v) {
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(long v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
- unsigned long v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(long long v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
- unsigned long long v) { // NOLINT
- return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-
// Given a 128-bit number expressed as a pair of uint64_t, high half first,
// return that number multiplied by the given 32-bit value. If the result is
// too large to fit in a 128-bit number, divide it by 2 until it fits.
diff --git a/contrib/restricted/abseil-cpp/absl/strings/numbers.h b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
index ad4e66b634..739dbb28f9 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/numbers.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/numbers.h
@@ -32,7 +32,6 @@
#endif
#include <cstddef>
-#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <ctime>
@@ -40,12 +39,10 @@
#include <string>
#include <type_traits>
-#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
-#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
@@ -161,96 +158,6 @@ bool safe_strtou128_base(absl::string_view text,
static const int kFastToBufferSize = 32;
static const int kSixDigitsToBufferSize = 16;
-template <class T>
-std::enable_if_t<!std::is_unsigned<T>::value, bool> IsNegative(const T& v) {
- return v < T();
-}
-
-template <class T>
-std::enable_if_t<std::is_unsigned<T>::value, std::false_type> IsNegative(
- const T&) {
- // The integer is unsigned, so return a compile-time constant.
- // This can help the optimizer avoid having to prove bool to be false later.
- return std::false_type();
-}
-
-template <class T>
-std::enable_if_t<std::is_unsigned<std::decay_t<T>>::value, T&&>
-UnsignedAbsoluteValue(T&& v ABSL_ATTRIBUTE_LIFETIME_BOUND) {
- // The value is unsigned; just return the original.
- return std::forward<T>(v);
-}
-
-template <class T>
-ABSL_ATTRIBUTE_CONST_FUNCTION
- std::enable_if_t<!std::is_unsigned<T>::value, std::make_unsigned_t<T>>
- UnsignedAbsoluteValue(T v) {
- using U = std::make_unsigned_t<T>;
- return IsNegative(v) ? U() - static_cast<U>(v) : static_cast<U>(v);
-}
-
-// Returns the number of base-10 digits in the given number.
-// Note that this strictly counts digits. It does not count the sign.
-// The `initial_digits` parameter is the starting point, which is normally equal
-// to 1 because the number of digits in 0 is 1 (a special case).
-// However, callers may e.g. wish to change it to 2 to account for the sign.
-template <typename T>
-std::enable_if_t<std::is_unsigned<T>::value, uint32_t> Base10Digits(
- T v, const uint32_t initial_digits = 1) {
- uint32_t r = initial_digits;
- // If code size becomes an issue, the 'if' stage can be removed for a minor
- // performance loss.
- for (;;) {
- if (ABSL_PREDICT_TRUE(v < 10 * 10)) {
- r += (v >= 10);
- break;
- }
- if (ABSL_PREDICT_TRUE(v < 1000 * 10)) {
- r += (v >= 1000) + 2;
- break;
- }
- if (ABSL_PREDICT_TRUE(v < 100000 * 10)) {
- r += (v >= 100000) + 4;
- break;
- }
- r += 6;
- v = static_cast<T>(v / 1000000);
- }
- return r;
-}
-
-template <typename T>
-std::enable_if_t<std::is_signed<T>::value, uint32_t> Base10Digits(
- T v, uint32_t r = 1) {
- // Branchlessly add 1 to account for a minus sign.
- r += static_cast<uint32_t>(IsNegative(v));
- return Base10Digits(UnsignedAbsoluteValue(v), r);
-}
-
-// These functions return the number of base-10 digits, but multiplied by -1 if
-// the input itself is negative. This is handy and efficient for later usage,
-// since the bitwise complement of the result becomes equal to the number of
-// characters required.
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- signed char v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- unsigned char v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- short v); // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- unsigned short v); // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(int v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- unsigned int v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- long v); // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- unsigned long v); // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- long long v); // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
- unsigned long long v); // NOLINT
-
// Helper function for fast formatting of floating-point values.
// The result is the same as printf's "%g", a.k.a. "%.6g"; that is, six
// significant digits are returned, trailing zeros are removed, and numbers
@@ -259,18 +166,24 @@ ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
// Required buffer size is `kSixDigitsToBufferSize`.
size_t SixDigitsToBuffer(double d, absl::Nonnull<char*> buffer);
-// All of these functions take an output buffer
+// WARNING: These functions may write more characters than necessary, because
+// they are intended for speed. All functions take an output buffer
// as an argument and return a pointer to the last byte they wrote, which is the
// terminating '\0'. At most `kFastToBufferSize` bytes are written.
-absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(uint32_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer);
+absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer)
+ ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(uint32_t n, absl::Nonnull<char*> out_str)
+ ABSL_INTERNAL_NEED_MIN_SIZE(out_str, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer)
+ ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer)
+ ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
// For enums and integer types that are not an exact match for the types above,
// use templates to call the appropriate one of the four overloads above.
template <typename int_type>
-absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer) {
+absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer)
+ ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize) {
static_assert(sizeof(i) <= 64 / 8,
"FastIntToBuffer works only with 64-bit-or-less integers.");
// TODO(jorg): This signed-ness check is used because it works correctly
@@ -294,58 +207,6 @@ absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer) {
}
}
-// These functions do NOT add any null-terminator.
-// They return a pointer to the beginning of the written string.
-// The digit counts provided must *exactly* match the number of base-10 digits
-// in the number, or the behavior is undefined.
-// (i.e. do NOT count the minus sign, or over- or under-count the digits.)
-absl::Nonnull<char*> FastIntToBufferBackward(int32_t i,
- absl::Nonnull<char*> buffer_end,
- uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(uint32_t i,
- absl::Nonnull<char*> buffer_end,
- uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(int64_t i,
- absl::Nonnull<char*> buffer_end,
- uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(uint64_t i,
- absl::Nonnull<char*> buffer_end,
- uint32_t exact_digit_count);
-
-// For enums and integer types that are not an exact match for the types above,
-// use templates to call the appropriate one of the four overloads above.
-template <typename int_type>
-absl::Nonnull<char*> FastIntToBufferBackward(int_type i,
- absl::Nonnull<char*> buffer_end,
- uint32_t exact_digit_count) {
- static_assert(
- sizeof(i) <= 64 / 8,
- "FastIntToBufferBackward works only with 64-bit-or-less integers.");
- // This signed-ness check is used because it works correctly
- // with enums, and it also serves to check that int_type is not a pointer.
- // If one day something like std::is_signed<enum E> works, switch to it.
- // These conditions are constexpr bools to suppress MSVC warning C4127.
- constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
- constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
- if (kIsSigned) {
- if (kUse64Bit) {
- return FastIntToBufferBackward(static_cast<int64_t>(i), buffer_end,
- exact_digit_count);
- } else {
- return FastIntToBufferBackward(static_cast<int32_t>(i), buffer_end,
- exact_digit_count);
- }
- } else {
- if (kUse64Bit) {
- return FastIntToBufferBackward(static_cast<uint64_t>(i), buffer_end,
- exact_digit_count);
- } else {
- return FastIntToBufferBackward(static_cast<uint32_t>(i), buffer_end,
- exact_digit_count);
- }
- }
-}
-
// Implementation of SimpleAtoi, generalized to support arbitrary base (used
// with base different from 10 elsewhere in Abseil implementation).
template <typename int_type>
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc b/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
index 098ab18388..c51c13732d 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_cat.cc
@@ -20,19 +20,18 @@
#include <cstdint>
#include <cstring>
#include <initializer_list>
+#include <limits>
#include <string>
-#include <type_traits>
#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/nullability.h"
#include "absl/strings/internal/resize_uninitialized.h"
-#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
-
// ----------------------------------------------------------------------
// StrCat()
// This merges the given strings or integers, with no delimiter. This
@@ -43,7 +42,8 @@ ABSL_NAMESPACE_BEGIN
namespace {
// Append is merely a version of memcpy that returns the address of the byte
// after the area just overwritten.
-absl::Nonnull<char*> Append(absl::Nonnull<char*> out, const AlphaNum& x) {
+inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out,
+ const AlphaNum& x) {
// memcpy is allowed to overwrite arbitrary memory, so doing this after the
// call would force an extra fetch of x.size().
char* after = out + x.size();
@@ -53,12 +53,23 @@ absl::Nonnull<char*> Append(absl::Nonnull<char*> out, const AlphaNum& x) {
return after;
}
+inline void STLStringAppendUninitializedAmortized(std::string* dest,
+ size_t to_append) {
+ strings_internal::AppendUninitializedTraits<std::string>::Append(dest,
+ to_append);
+}
} // namespace
std::string StrCat(const AlphaNum& a, const AlphaNum& b) {
std::string result;
- absl::strings_internal::STLStringResizeUninitialized(&result,
- a.size() + b.size());
+ // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+ // in memory strings to overflow a uint64_t.
+ constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+ const uint64_t result_size =
+ static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size());
+ ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
+ absl::strings_internal::STLStringResizeUninitialized(
+ &result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
@@ -69,8 +80,15 @@ std::string StrCat(const AlphaNum& a, const AlphaNum& b) {
std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) {
std::string result;
+ // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+ // in memory strings to overflow a uint64_t.
+ constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+ const uint64_t result_size = static_cast<uint64_t>(a.size()) +
+ static_cast<uint64_t>(b.size()) +
+ static_cast<uint64_t>(c.size());
+ ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
strings_internal::STLStringResizeUninitialized(
- &result, a.size() + b.size() + c.size());
+ &result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
@@ -83,8 +101,16 @@ std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) {
std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c,
const AlphaNum& d) {
std::string result;
+ // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+ // in memory strings to overflow a uint64_t.
+ constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+ const uint64_t result_size = static_cast<uint64_t>(a.size()) +
+ static_cast<uint64_t>(b.size()) +
+ static_cast<uint64_t>(c.size()) +
+ static_cast<uint64_t>(d.size());
+ ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
strings_internal::STLStringResizeUninitialized(
- &result, a.size() + b.size() + c.size() + d.size());
+ &result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
@@ -98,135 +124,18 @@ std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c,
namespace strings_internal {
// Do not call directly - these are not part of the public API.
-void STLStringAppendUninitializedAmortized(std::string* dest,
- size_t to_append) {
- strings_internal::AppendUninitializedTraits<std::string>::Append(dest,
- to_append);
-}
-
-template <typename Integer>
-std::enable_if_t<std::is_integral<Integer>::value, std::string> IntegerToString(
- Integer i) {
- std::string str;
- const auto /* either bool or std::false_type */ is_negative =
- absl::numbers_internal::IsNegative(i);
- const uint32_t digits = absl::numbers_internal::Base10Digits(
- absl::numbers_internal::UnsignedAbsoluteValue(i));
- absl::strings_internal::STLStringResizeUninitialized(
- &str, digits + static_cast<uint32_t>(is_negative));
- absl::numbers_internal::FastIntToBufferBackward(i, &str[str.size()], digits);
- return str;
-}
-
-template <>
-std::string IntegerToString(long i) { // NOLINT
- if (sizeof(i) <= sizeof(int)) {
- return IntegerToString(static_cast<int>(i));
- } else {
- return IntegerToString(static_cast<long long>(i)); // NOLINT
- }
-}
-
-template <>
-std::string IntegerToString(unsigned long i) { // NOLINT
- if (sizeof(i) <= sizeof(unsigned int)) {
- return IntegerToString(static_cast<unsigned int>(i));
- } else {
- return IntegerToString(static_cast<unsigned long long>(i)); // NOLINT
- }
-}
-
-template <typename Float>
-std::enable_if_t<std::is_floating_point<Float>::value, std::string>
-FloatToString(Float f) {
- std::string result;
- strings_internal::STLStringResizeUninitialized(
- &result, numbers_internal::kSixDigitsToBufferSize);
- char* start = &result[0];
- result.erase(numbers_internal::SixDigitsToBuffer(f, start));
- return result;
-}
-
-std::string SingleArgStrCat(int x) { return IntegerToString(x); }
-std::string SingleArgStrCat(unsigned int x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(unsigned long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(long long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(unsigned long long x) { return IntegerToString(x); }
-std::string SingleArgStrCat(float x) { return FloatToString(x); }
-std::string SingleArgStrCat(double x) { return FloatToString(x); }
-
-template <class Integer>
-std::enable_if_t<std::is_integral<Integer>::value, void> AppendIntegerToString(
- std::string& str, Integer i) {
- const auto /* either bool or std::false_type */ is_negative =
- absl::numbers_internal::IsNegative(i);
- const uint32_t digits = absl::numbers_internal::Base10Digits(
- absl::numbers_internal::UnsignedAbsoluteValue(i));
- absl::strings_internal::STLStringAppendUninitializedAmortized(
- &str, digits + static_cast<uint32_t>(is_negative));
- absl::numbers_internal::FastIntToBufferBackward(i, &str[str.size()], digits);
-}
-
-template <>
-void AppendIntegerToString(std::string& str, long i) { // NOLINT
- if (sizeof(i) <= sizeof(int)) {
- return AppendIntegerToString(str, static_cast<int>(i));
- } else {
- return AppendIntegerToString(str, static_cast<long long>(i)); // NOLINT
- }
-}
-
-template <>
-void AppendIntegerToString(std::string& str,
- unsigned long i) { // NOLINT
- if (sizeof(i) <= sizeof(unsigned int)) {
- return AppendIntegerToString(str, static_cast<unsigned int>(i));
- } else {
- return AppendIntegerToString(str,
- static_cast<unsigned long long>(i)); // NOLINT
- }
-}
-
-// `SingleArgStrAppend` overloads are defined here for the same reasons as with
-// `SingleArgStrCat` above.
-void SingleArgStrAppend(std::string& str, int x) {
- return AppendIntegerToString(str, x);
-}
-
-void SingleArgStrAppend(std::string& str, unsigned int x) {
- return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, long x) {
- return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, unsigned long x) {
- return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, long long x) {
- return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, unsigned long long x) {
- return AppendIntegerToString(str, x);
-}
-
std::string CatPieces(std::initializer_list<absl::string_view> pieces) {
std::string result;
- size_t total_size = 0;
- for (absl::string_view piece : pieces) total_size += piece.size();
- strings_internal::STLStringResizeUninitialized(&result, total_size);
+ // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+ // in memory strings to overflow a uint64_t.
+ constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+ uint64_t total_size = 0;
+ for (absl::string_view piece : pieces) {
+ total_size += piece.size();
+ }
+ ABSL_INTERNAL_CHECK(total_size <= kMaxSize, "size_t overflow");
+ strings_internal::STLStringResizeUninitialized(
+ &result, static_cast<size_t>(total_size));
char* const begin = &result[0];
char* out = begin;
@@ -258,7 +167,7 @@ void AppendPieces(absl::Nonnull<std::string*> dest,
ASSERT_NO_OVERLAP(*dest, piece);
to_append += piece.size();
}
- strings_internal::STLStringAppendUninitializedAmortized(dest, to_append);
+ STLStringAppendUninitializedAmortized(dest, to_append);
char* const begin = &(*dest)[0];
char* out = begin + old_size;
@@ -277,7 +186,7 @@ void AppendPieces(absl::Nonnull<std::string*> dest,
void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
ASSERT_NO_OVERLAP(*dest, a);
std::string::size_type old_size = dest->size();
- strings_internal::STLStringAppendUninitializedAmortized(dest, a.size());
+ STLStringAppendUninitializedAmortized(dest, a.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
@@ -289,8 +198,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
std::string::size_type old_size = dest->size();
- strings_internal::STLStringAppendUninitializedAmortized(dest,
- a.size() + b.size());
+ STLStringAppendUninitializedAmortized(dest, a.size() + b.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
@@ -304,8 +212,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
ASSERT_NO_OVERLAP(*dest, b);
ASSERT_NO_OVERLAP(*dest, c);
std::string::size_type old_size = dest->size();
- strings_internal::STLStringAppendUninitializedAmortized(
- dest, a.size() + b.size() + c.size());
+ STLStringAppendUninitializedAmortized(dest, a.size() + b.size() + c.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
@@ -321,7 +228,7 @@ void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
ASSERT_NO_OVERLAP(*dest, c);
ASSERT_NO_OVERLAP(*dest, d);
std::string::size_type old_size = dest->size();
- strings_internal::STLStringAppendUninitializedAmortized(
+ STLStringAppendUninitializedAmortized(
dest, a.size() + b.size() + c.size() + d.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_cat.h b/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
index 1b52a36f4c..b98adc0248 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_cat.h
@@ -93,6 +93,8 @@
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <initializer_list>
+#include <limits>
#include <string>
#include <type_traits>
#include <utility>
@@ -312,6 +314,10 @@ class AlphaNum {
// No bool ctor -- bools convert to an integral type.
// A bool ctor would also convert incoming pointers (bletch).
+ // Prevent brace initialization
+ template <typename T>
+ AlphaNum(std::initializer_list<T>) = delete; // NOLINT(runtime/explicit)
+
AlphaNum(int x) // NOLINT(runtime/explicit)
: piece_(digits_, static_cast<size_t>(
numbers_internal::FastIntToBuffer(x, digits_) -
@@ -448,36 +454,77 @@ std::string CatPieces(std::initializer_list<absl::string_view> pieces);
void AppendPieces(absl::Nonnull<std::string*> dest,
std::initializer_list<absl::string_view> pieces);
-void STLStringAppendUninitializedAmortized(std::string* dest, size_t to_append);
+template <typename Integer>
+std::string IntegerToString(Integer i) {
+ // Any integer (signed/unsigned) up to 64 bits can be formatted into a buffer
+ // with 22 bytes (including NULL at the end).
+ constexpr size_t kMaxDigits10 = 22;
+ std::string result;
+ strings_internal::STLStringResizeUninitialized(&result, kMaxDigits10);
+ char* start = &result[0];
+ // note: this can be optimized to not write last zero.
+ char* end = numbers_internal::FastIntToBuffer(i, start);
+ auto size = static_cast<size_t>(end - start);
+ assert((size < result.size()) &&
+ "StrCat(Integer) does not fit into kMaxDigits10");
+ result.erase(size);
+ return result;
+}
+template <typename Float>
+std::string FloatToString(Float f) {
+ std::string result;
+ strings_internal::STLStringResizeUninitialized(
+ &result, numbers_internal::kSixDigitsToBufferSize);
+ char* start = &result[0];
+ result.erase(numbers_internal::SixDigitsToBuffer(f, start));
+ return result;
+}
// `SingleArgStrCat` overloads take built-in `int`, `long` and `long long` types
// (signed / unsigned) to avoid ambiguity on the call side. If we used int32_t
// and int64_t, then at least one of the three (`int` / `long` / `long long`)
// would have been ambiguous when passed to `SingleArgStrCat`.
-std::string SingleArgStrCat(int x);
-std::string SingleArgStrCat(unsigned int x);
-std::string SingleArgStrCat(long x); // NOLINT
-std::string SingleArgStrCat(unsigned long x); // NOLINT
-std::string SingleArgStrCat(long long x); // NOLINT
-std::string SingleArgStrCat(unsigned long long x); // NOLINT
-std::string SingleArgStrCat(float x);
-std::string SingleArgStrCat(double x);
-
-// `SingleArgStrAppend` overloads are defined here for the same reasons as with
-// `SingleArgStrCat` above.
-void SingleArgStrAppend(std::string& str, int x);
-void SingleArgStrAppend(std::string& str, unsigned int x);
-void SingleArgStrAppend(std::string& str, long x); // NOLINT
-void SingleArgStrAppend(std::string& str, unsigned long x); // NOLINT
-void SingleArgStrAppend(std::string& str, long long x); // NOLINT
-void SingleArgStrAppend(std::string& str, unsigned long long x); // NOLINT
-
-template <typename T,
- typename = std::enable_if_t<std::is_arithmetic<T>::value &&
- !std::is_same<T, char>::value &&
- !std::is_same<T, bool>::value>>
+inline std::string SingleArgStrCat(int x) { return IntegerToString(x); }
+inline std::string SingleArgStrCat(unsigned int x) {
+ return IntegerToString(x);
+}
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(long x) { return IntegerToString(x); }
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(unsigned long x) {
+ return IntegerToString(x);
+}
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(long long x) { return IntegerToString(x); }
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(unsigned long long x) {
+ return IntegerToString(x);
+}
+inline std::string SingleArgStrCat(float x) { return FloatToString(x); }
+inline std::string SingleArgStrCat(double x) { return FloatToString(x); }
+
+// As of September 2023, the SingleArgStrCat() optimization is only enabled for
+// libc++. The reasons for this are:
+// 1) The SSO size for libc++ is 23, while libstdc++ and MSSTL have an SSO size
+// of 15. Since IntegerToString unconditionally resizes the string to 22 bytes,
+// this causes both libstdc++ and MSSTL to allocate.
+// 2) strings_internal::STLStringResizeUninitialized() only has an
+// implementation that avoids initialization when using libc++. This isn't as
+// relevant as (1), and the cost should be benchmarked if (1) ever changes on
+// libstc++ or MSSTL.
+#ifdef _LIBCPP_VERSION
+#define ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE true
+#else
+#define ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE false
+#endif
+
+template <typename T, typename = std::enable_if_t<
+ ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE &&
+ std::is_arithmetic<T>{} && !std::is_same<T, char>{}>>
using EnableIfFastCase = T;
+#undef ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE
+
} // namespace strings_internal
ABSL_MUST_USE_RESULT inline std::string StrCat() { return std::string(); }
@@ -553,67 +600,6 @@ inline void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
static_cast<const AlphaNum&>(args).Piece()...});
}
-template <class String, class T>
-std::enable_if_t<
- std::is_integral<absl::strings_internal::EnableIfFastCase<T>>::value, void>
-StrAppend(absl::Nonnull<String*> result, T i) {
- return absl::strings_internal::SingleArgStrAppend(*result, i);
-}
-
-// This overload is only selected if all the parameters are numbers that can be
-// handled quickly.
-// Later we can look into how we can extend this to more general argument
-// mixtures without bloating codegen too much, or copying unnecessarily.
-template <typename String, typename... T>
-std::enable_if_t<
- (sizeof...(T) > 1),
- std::common_type_t<std::conditional_t<
- true, void, absl::strings_internal::EnableIfFastCase<T>>...>>
-StrAppend(absl::Nonnull<String*> str, T... args) {
- // Do not add unnecessary variables, logic, or even "free" lambdas here.
- // They can add overhead for the compiler and/or at run time.
- // Furthermore, assume this function will be inlined.
- // This function is carefully tailored to be able to be largely optimized away
- // so that it becomes near-equivalent to the caller handling each argument
- // individually while minimizing register pressure, so that the compiler
- // can inline it with minimal overhead.
-
- // First, calculate the total length, so we can perform just a single resize.
- // Save all the lengths for later.
- size_t total_length = 0;
- const ptrdiff_t lengths[] = {
- absl::numbers_internal::GetNumDigitsOrNegativeIfNegative(args)...};
- for (const ptrdiff_t possibly_negative_length : lengths) {
- // Lengths are negative for negative numbers. Keep them for later use, but
- // take their absolute values for calculating total lengths;
- total_length += possibly_negative_length < 0
- ? static_cast<size_t>(-possibly_negative_length)
- : static_cast<size_t>(possibly_negative_length);
- }
-
- // Now reserve space for all the arguments.
- const size_t old_size = str->size();
- absl::strings_internal::STLStringAppendUninitializedAmortized(str,
- total_length);
-
- // Finally, output each argument one-by-one, from left to right.
- size_t i = 0; // The current argument we're processing
- ptrdiff_t n; // The length of the current argument
- typename String::pointer pos = &(*str)[old_size];
- using SomeTrivialEmptyType = std::false_type;
- const SomeTrivialEmptyType dummy;
- // Ugly code due to the lack of C++17 fold expressions
- const SomeTrivialEmptyType dummies[] = {
- (/* Comma expressions are poor man's C++17 fold expression for C++14 */
- (void)(n = lengths[i]),
- (void)(n < 0 ? (void)(*pos++ = '-'), (n = ~n) : 0),
- (void)absl::numbers_internal::FastIntToBufferBackward(
- absl::numbers_internal::UnsignedAbsoluteValue(std::move(args)),
- pos += n, static_cast<uint32_t>(n)),
- (void)++i, dummy)...};
- (void)dummies; // Remove & migrate to fold expressions in C++17
-}
-
// Helper function for the future StrCat default floating-point format, %.6g
// This is fast.
inline strings_internal::AlphaNumBuffer<
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_format.h b/contrib/restricted/abseil-cpp/absl/strings/str_format.h
index 66b6af5854..76904d3228 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_format.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_format.h
@@ -181,7 +181,7 @@ class FormatCountCapture {
// For a `FormatSpec` to be valid at compile-time, it must be provided as
// either:
//
-// * A `constexpr` literal or `absl::string_view`, which is how it most often
+// * A `constexpr` literal or `absl::string_view`, which is how it is most often
// used.
// * A `ParsedFormat` instantiation, which ensures the format string is
// valid before use. (See below.)
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_join.h b/contrib/restricted/abseil-cpp/absl/strings/str_join.h
index 6a92c0f53a..8d7bc6ba79 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_join.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_join.h
@@ -247,12 +247,20 @@ std::string StrJoin(const Range& range, absl::string_view separator,
return strings_internal::JoinRange(range, separator, fmt);
}
-template <typename T, typename Formatter>
+template <typename T, typename Formatter,
+ typename = typename std::enable_if<
+ !std::is_convertible<T, absl::string_view>::value>::type>
std::string StrJoin(std::initializer_list<T> il, absl::string_view separator,
Formatter&& fmt) {
return strings_internal::JoinRange(il, separator, fmt);
}
+template <typename Formatter>
+inline std::string StrJoin(std::initializer_list<absl::string_view> il,
+ absl::string_view separator, Formatter&& fmt) {
+ return strings_internal::JoinRange(il, separator, fmt);
+}
+
template <typename... T, typename Formatter>
std::string StrJoin(const std::tuple<T...>& value, absl::string_view separator,
Formatter&& fmt) {
@@ -269,16 +277,22 @@ std::string StrJoin(const Range& range, absl::string_view separator) {
return strings_internal::JoinRange(range, separator);
}
-template <typename T>
-std::string StrJoin(std::initializer_list<T> il,
- absl::string_view separator) {
+template <typename T, typename = typename std::enable_if<!std::is_convertible<
+ T, absl::string_view>::value>::type>
+std::string StrJoin(std::initializer_list<T> il, absl::string_view separator) {
+ return strings_internal::JoinRange(il, separator);
+}
+
+inline std::string StrJoin(std::initializer_list<absl::string_view> il,
+ absl::string_view separator) {
return strings_internal::JoinRange(il, separator);
}
template <typename... T>
std::string StrJoin(const std::tuple<T...>& value,
absl::string_view separator) {
- return strings_internal::JoinAlgorithm(value, separator, AlphaNumFormatter());
+ return strings_internal::JoinTuple(value, separator,
+ std::index_sequence_for<T...>{});
}
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp/absl/strings/str_split.h b/contrib/restricted/abseil-cpp/absl/strings/str_split.h
index 87540278ad..ba176fca3c 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/str_split.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/str_split.h
@@ -456,7 +456,7 @@ using EnableSplitIfString =
// // Stores results in a std::set<std::string>, which also performs
// // de-duplication and orders the elements in ascending order.
// std::set<std::string> a = absl::StrSplit("b,a,c,a,b", ',');
-// // v[0] == "a", v[1] == "b", v[2] = "c"
+// // a[0] == "a", a[1] == "b", a[2] == "c"
//
// // `StrSplit()` can be used within a range-based for loop, in which case
// // each element will be of type `absl::string_view`.
@@ -544,7 +544,7 @@ StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d,
typename strings_internal::SelectDelimiter<Delimiter>::type;
return strings_internal::Splitter<DelimiterType, Predicate,
absl::string_view>(
- text.value(), DelimiterType(d), std::move(p));
+ text.value(), DelimiterType(std::move(d)), std::move(p));
}
template <typename Delimiter, typename Predicate, typename StringType,
diff --git a/contrib/restricted/abseil-cpp/absl/strings/string_view.h b/contrib/restricted/abseil-cpp/absl/strings/string_view.h
index 04ca0a38ae..ff7600144a 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/string_view.h
+++ b/contrib/restricted/abseil-cpp/absl/strings/string_view.h
@@ -159,7 +159,7 @@ ABSL_NAMESPACE_BEGIN
//
// absl::string_view() == absl::string_view("", 0)
// absl::string_view(nullptr, 0) == absl::string_view("abcdef"+6, 0)
-class string_view {
+class ABSL_INTERNAL_ATTRIBUTE_VIEW string_view {
public:
using traits_type = std::char_traits<char>;
using value_type = char;
@@ -173,6 +173,7 @@ class string_view {
using reverse_iterator = const_reverse_iterator;
using size_type = size_t;
using difference_type = std::ptrdiff_t;
+ using absl_internal_is_view = std::true_type;
static constexpr size_type npos = static_cast<size_type>(-1);
@@ -670,7 +671,7 @@ class string_view {
}
static constexpr size_type StrlenInternal(absl::Nonnull<const char*> str) {
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && !defined(__clang__)
+#if defined(_MSC_VER) && !defined(__clang__)
// MSVC 2017+ can evaluate this at compile-time.
const char* begin = str;
while (*str != '\0') ++str;
diff --git a/contrib/restricted/abseil-cpp/absl/strings/substitute.cc b/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
index dd32c75f69..a71f565a12 100644
--- a/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
+++ b/contrib/restricted/abseil-cpp/absl/strings/substitute.cc
@@ -18,6 +18,7 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <limits>
#include <string>
#include "absl/base/config.h"
@@ -84,6 +85,9 @@ void SubstituteAndAppendArray(
// Build the string.
size_t original_size = output->size();
+ ABSL_INTERNAL_CHECK(
+ size <= std::numeric_limits<size_t>::max() - original_size,
+ "size_t overflow");
strings_internal::STLStringResizeUninitializedAmortized(output,
original_size + size);
char* target = &(*output)[original_size];
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index 39b18482f3..129067c151 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -211,7 +211,7 @@ class NodeSet {
Vec<int32_t> table_;
uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
- static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
+ static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a) * 41; }
// Return index for storing v. May return an empty index or deleted index
uint32_t FindIndex(int32_t v) const {
@@ -333,7 +333,7 @@ class PointerMap {
private:
// Number of buckets in hash table for pointer lookups.
- static constexpr uint32_t kHashTableSize = 8171; // should be prime
+ static constexpr uint32_t kHashTableSize = 262139; // should be prime
const Vec<Node*>* nodes_;
std::array<int32_t, kHashTableSize> table_;
@@ -365,6 +365,14 @@ static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
return (n->version == NodeVersion(id)) ? n : nullptr;
}
+void GraphCycles::TestOnlyAddNodes(uint32_t n) {
+ uint32_t old_size = rep_->nodes_.size();
+ rep_->nodes_.resize(n);
+ for (auto i = old_size; i < n; ++i) {
+ rep_->nodes_[i] = nullptr;
+ }
+}
+
GraphCycles::GraphCycles() {
InitArenaIfNecessary();
rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
@@ -373,6 +381,7 @@ GraphCycles::GraphCycles() {
GraphCycles::~GraphCycles() {
for (auto* node : rep_->nodes_) {
+ if (node == nullptr) { continue; }
node->Node::~Node();
base_internal::LowLevelAlloc::Free(node);
}
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
index ceba33e4de..08f304bce1 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
@@ -126,6 +126,11 @@ class GraphCycles {
// Expensive: should only be called from graphcycles_test.cc.
bool CheckInvariants() const;
+ // Test-only method to add more nodes. The nodes will not be valid, and this
+ // method should only be used to test the behavior of the graph when it is
+ // very full.
+ void TestOnlyAddNodes(uint32_t n);
+
// ----------------------------------------------------
struct Rep;
private:
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
index d53a22bbd9..be3f1f56bb 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
@@ -148,7 +148,7 @@ struct SynchWaitParams;
//
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
-class ABSL_LOCKABLE Mutex {
+class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
public:
// Creates a `Mutex` that is not held by anyone. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
@@ -190,7 +190,7 @@ class ABSL_LOCKABLE Mutex {
// If the mutex can be acquired without blocking, does so exclusively and
// returns `true`. Otherwise, returns `false`. Returns `true` with high
// probability if the `Mutex` was free.
- bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+ ABSL_MUST_USE_RESULT bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
// Mutex::AssertHeld()
//
@@ -255,7 +255,7 @@ class ABSL_LOCKABLE Mutex {
// If the mutex can be acquired without blocking, acquires this mutex for
// shared access and returns `true`. Otherwise, returns `false`. Returns
// `true` with high probability if the `Mutex` was free or shared.
- bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+ ABSL_MUST_USE_RESULT bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
// Mutex::AssertReaderHeld()
//
@@ -281,7 +281,8 @@ class ABSL_LOCKABLE Mutex {
void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
- bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ ABSL_MUST_USE_RESULT bool WriterTryLock()
+ ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
return this->TryLock();
}
diff --git a/contrib/restricted/abseil-cpp/absl/time/civil_time.h b/contrib/restricted/abseil-cpp/absl/time/civil_time.h
index 3e904a113d..d198ebaec0 100644
--- a/contrib/restricted/abseil-cpp/absl/time/civil_time.h
+++ b/contrib/restricted/abseil-cpp/absl/time/civil_time.h
@@ -55,7 +55,7 @@
// Example:
//
// // Construct a civil-time object for a specific day
-// const absl::CivilDay cd(1969, 07, 20);
+// const absl::CivilDay cd(1969, 7, 20);
//
// // Construct a civil-time object for a specific second
// const absl::CivilSecond cd(2018, 8, 1, 12, 0, 1);
@@ -65,7 +65,7 @@
// Example:
//
// // Valid in C++14
-// constexpr absl::CivilDay cd(1969, 07, 20);
+// constexpr absl::CivilDay cd(1969, 7, 20);
#ifndef ABSL_TIME_CIVIL_TIME_H_
#define ABSL_TIME_CIVIL_TIME_H_
diff --git a/contrib/restricted/abseil-cpp/absl/time/clock.cc b/contrib/restricted/abseil-cpp/absl/time/clock.cc
index aa74367b8a..ecd539e5ca 100644
--- a/contrib/restricted/abseil-cpp/absl/time/clock.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/clock.cc
@@ -88,11 +88,25 @@ ABSL_NAMESPACE_END
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
+
+// On some processors, consecutive reads of the cycle counter may yield the
+// same value (weakly-increasing). In debug mode, clear the least significant
+// bits to discourage depending on a strictly-increasing Now() value.
+// In x86-64's debug mode, discourage depending on a strictly-increasing Now()
+// value.
+#if !defined(NDEBUG) && defined(__x86_64__)
+constexpr int64_t kCycleClockNowMask = ~int64_t{0xff};
+#else
+constexpr int64_t kCycleClockNowMask = ~int64_t{0};
+#endif
+
// This is a friend wrapper around UnscaledCycleClock::Now()
// (needed to access UnscaledCycleClock).
class UnscaledCycleClockWrapperForGetCurrentTime {
public:
- static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+ static int64_t Now() {
+ return base_internal::UnscaledCycleClock::Now() & kCycleClockNowMask;
+ }
};
} // namespace time_internal
diff --git a/contrib/restricted/abseil-cpp/absl/time/duration.cc b/contrib/restricted/abseil-cpp/absl/time/duration.cc
index bdb16e2109..8d0b66f825 100644
--- a/contrib/restricted/abseil-cpp/absl/time/duration.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/duration.cc
@@ -219,7 +219,7 @@ struct SafeMultiply {
? static_cast<uint128>(Uint128Low64(a) * Uint128Low64(b))
: a * b;
}
- return b == 0 ? b : (a > kuint128max / b) ? kuint128max : a * b;
+ return b == 0 ? b : (a > Uint128Max() / b) ? Uint128Max() : a * b;
}
};
@@ -280,33 +280,35 @@ inline bool IDivFastPath(const Duration num, const Duration den, int64_t* q,
int64_t den_hi = time_internal::GetRepHi(den);
uint32_t den_lo = time_internal::GetRepLo(den);
- if (den_hi == 0 && den_lo == kTicksPerNanosecond) {
- // Dividing by 1ns
- if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
- *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
- *rem = time_internal::MakeDuration(0, num_lo % den_lo);
- return true;
- }
- } else if (den_hi == 0 && den_lo == 100 * kTicksPerNanosecond) {
- // Dividing by 100ns (common when converting to Universal time)
- if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
- *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
- *rem = time_internal::MakeDuration(0, num_lo % den_lo);
- return true;
- }
- } else if (den_hi == 0 && den_lo == 1000 * kTicksPerNanosecond) {
- // Dividing by 1us
- if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
- *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
- *rem = time_internal::MakeDuration(0, num_lo % den_lo);
- return true;
- }
- } else if (den_hi == 0 && den_lo == 1000000 * kTicksPerNanosecond) {
- // Dividing by 1ms
- if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
- *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
- *rem = time_internal::MakeDuration(0, num_lo % den_lo);
- return true;
+ if (den_hi == 0) {
+ if (den_lo == kTicksPerNanosecond) {
+ // Dividing by 1ns
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
+ *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_lo == 100 * kTicksPerNanosecond) {
+ // Dividing by 100ns (common when converting to Universal time)
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
+ *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_lo == 1000 * kTicksPerNanosecond) {
+ // Dividing by 1us
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
+ *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
+ } else if (den_lo == 1000000 * kTicksPerNanosecond) {
+ // Dividing by 1ms
+ if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
+ *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
+ *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+ return true;
+ }
}
} else if (den_hi > 0 && den_lo == 0) {
// Dividing by positive multiple of 1s
@@ -342,19 +344,10 @@ inline bool IDivFastPath(const Duration num, const Duration den, int64_t* q,
} // namespace
-namespace time_internal {
+namespace {
-// The 'satq' argument indicates whether the quotient should saturate at the
-// bounds of int64_t. If it does saturate, the difference will spill over to
-// the remainder. If it does not saturate, the remainder remain accurate,
-// but the returned quotient will over/underflow int64_t and should not be used.
-int64_t IDivDuration(bool satq, const Duration num, const Duration den,
+int64_t IDivSlowPath(bool satq, const Duration num, const Duration den,
Duration* rem) {
- int64_t q = 0;
- if (IDivFastPath(num, den, &q, rem)) {
- return q;
- }
-
const bool num_neg = num < ZeroDuration();
const bool den_neg = den < ZeroDuration();
const bool quotient_neg = num_neg != den_neg;
@@ -391,7 +384,27 @@ int64_t IDivDuration(bool satq, const Duration num, const Duration den,
return -static_cast<int64_t>(Uint128Low64(quotient128 - 1) & kint64max) - 1;
}
-} // namespace time_internal
+// The 'satq' argument indicates whether the quotient should saturate at the
+// bounds of int64_t. If it does saturate, the difference will spill over to
+// the remainder. If it does not saturate, the remainder remain accurate,
+// but the returned quotient will over/underflow int64_t and should not be used.
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int64_t IDivDurationImpl(bool satq,
+ const Duration num,
+ const Duration den,
+ Duration* rem) {
+ int64_t q = 0;
+ if (IDivFastPath(num, den, &q, rem)) {
+ return q;
+ }
+ return IDivSlowPath(satq, num, den, rem);
+}
+
+} // namespace
+
+int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
+ return IDivDurationImpl(true, num, den,
+ rem); // trunc towards zero
+}
//
// Additive operators.
@@ -475,7 +488,7 @@ Duration& Duration::operator/=(double r) {
}
Duration& Duration::operator%=(Duration rhs) {
- time_internal::IDivDuration(false, *this, rhs, this);
+ IDivDurationImpl(false, *this, rhs, this);
return *this;
}
@@ -501,9 +514,7 @@ double FDivDuration(Duration num, Duration den) {
// Trunc/Floor/Ceil.
//
-Duration Trunc(Duration d, Duration unit) {
- return d - (d % unit);
-}
+Duration Trunc(Duration d, Duration unit) { return d - (d % unit); }
Duration Floor(const Duration d, const Duration unit) {
const absl::Duration td = Trunc(d, unit);
@@ -591,15 +602,9 @@ double ToDoubleMicroseconds(Duration d) {
double ToDoubleMilliseconds(Duration d) {
return FDivDuration(d, Milliseconds(1));
}
-double ToDoubleSeconds(Duration d) {
- return FDivDuration(d, Seconds(1));
-}
-double ToDoubleMinutes(Duration d) {
- return FDivDuration(d, Minutes(1));
-}
-double ToDoubleHours(Duration d) {
- return FDivDuration(d, Hours(1));
-}
+double ToDoubleSeconds(Duration d) { return FDivDuration(d, Seconds(1)); }
+double ToDoubleMinutes(Duration d) { return FDivDuration(d, Minutes(1)); }
+double ToDoubleHours(Duration d) { return FDivDuration(d, Hours(1)); }
timespec ToTimespec(Duration d) {
timespec ts;
diff --git a/contrib/restricted/abseil-cpp/absl/time/format.cc b/contrib/restricted/abseil-cpp/absl/time/format.cc
index 15a26b14f7..bd06f8fb35 100644
--- a/contrib/restricted/abseil-cpp/absl/time/format.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/format.cc
@@ -16,6 +16,7 @@
#include <cctype>
#include <cstdint>
+#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
@@ -136,7 +137,7 @@ bool ParseTime(absl::string_view format, absl::string_view input,
if (b) {
*time = Join(parts);
} else if (err != nullptr) {
- *err = error;
+ *err = std::move(error);
}
return b;
}
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
index d01461222e..b509402783 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#if defined(_WIN32) || defined(_WIN64)
+#if !defined(_CRT_SECURE_NO_WARNINGS) && defined(_WIN32)
#define _CRT_SECURE_NO_WARNINGS 1
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
index f37c5b3042..e0d773b831 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -17,9 +17,6 @@
#if defined(__ANDROID__)
#include <sys/system_properties.h>
-#if defined(__ANDROID_API__) && __ANDROID_API__ >= 21
-#include <dlfcn.h>
-#endif
#endif
#if defined(__APPLE__)
@@ -66,32 +63,6 @@ namespace time_internal {
namespace cctz {
namespace {
-#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
-// Android 'L' removes __system_property_get() from the NDK, however
-// it is still a hidden symbol in libc so we use dlsym() to access it.
-// See Chromium's base/sys_info_android.cc for a similar example.
-
-using property_get_func = int (*)(const char*, char*);
-
-property_get_func LoadSystemPropertyGet() {
- int flag = RTLD_LAZY | RTLD_GLOBAL;
-#if defined(RTLD_NOLOAD)
- flag |= RTLD_NOLOAD; // libc.so should already be resident
-#endif
- if (void* handle = dlopen("libc.so", flag)) {
- void* sym = dlsym(handle, "__system_property_get");
- dlclose(handle);
- return reinterpret_cast<property_get_func>(sym);
- }
- return nullptr;
-}
-
-int __system_property_get(const char* name, char* value) {
- static property_get_func system_property_get = LoadSystemPropertyGet();
- return system_property_get ? system_property_get(name, value) : -1;
-}
-#endif
-
#if defined(USE_WIN32_LOCAL_TIME_ZONE)
// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
// local time zone. Returns an empty vector in case of an error.
diff --git a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
index 114026d066..2be3bb8d98 100644
--- a/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
+++ b/contrib/restricted/abseil-cpp/absl/time/internal/cctz/src/tzfile.h
@@ -77,11 +77,11 @@ struct tzhead {
** time uses 8 rather than 4 chars,
** then a POSIX-TZ-environment-variable-style string for use in handling
** instants after the last transition time stored in the file
-** (with nothing between the newlines if there is no POSIX representation for
-** such instants).
+** (with nothing between the newlines if there is no POSIX.1-2017
+** representation for such instants).
**
** If tz_version is '3' or greater, the above is extended as follows.
-** First, the POSIX TZ string's hour offset may range from -167
+** First, the TZ string's hour offset may range from -167
** through 167 as compared to the POSIX-required 0 through 24.
** Second, its DST start time may be January 1 at 00:00 and its stop
** time December 31 at 24:00 plus the difference between DST and
diff --git a/contrib/restricted/abseil-cpp/absl/time/time.h b/contrib/restricted/abseil-cpp/absl/time/time.h
index 37580805e2..15edbb4a66 100644
--- a/contrib/restricted/abseil-cpp/absl/time/time.h
+++ b/contrib/restricted/abseil-cpp/absl/time/time.h
@@ -75,15 +75,22 @@
struct timeval;
#endif
#include <chrono> // NOLINT(build/c++11)
+
+#ifdef __cpp_lib_three_way_comparison
+#include <compare>
+#endif // __cpp_lib_three_way_comparison
+
#include <cmath>
#include <cstdint>
#include <ctime>
#include <limits>
#include <ostream>
+#include <ratio> // NOLINT(build/c++11)
#include <string>
#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
@@ -98,7 +105,6 @@ class Time; // Defined below
class TimeZone; // Defined below
namespace time_internal {
-int64_t IDivDuration(bool satq, Duration num, Duration den, Duration* rem);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d);
@@ -306,6 +312,14 @@ class Duration {
};
// Relational Operators
+
+#ifdef __cpp_lib_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+ Duration lhs, Duration rhs);
+
+#endif // __cpp_lib_three_way_comparison
+
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs,
Duration rhs);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Duration lhs,
@@ -338,30 +352,6 @@ ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator-(Duration lhs,
return lhs -= rhs;
}
-// Multiplicative Operators
-// Integer operands must be representable as int64_t.
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) {
- return lhs *= rhs;
-}
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) {
- return rhs *= lhs;
-}
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) {
- return lhs /= rhs;
-}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs,
- Duration rhs) {
- return time_internal::IDivDuration(true, lhs, rhs,
- &lhs); // trunc towards zero
-}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
- Duration rhs) {
- return lhs %= rhs;
-}
-
// IDivDuration()
//
// Divides a numerator `Duration` by a denominator `Duration`, returning the
@@ -390,10 +380,7 @@ ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
// // Here, q would overflow int64_t, so rem accounts for the difference.
// int64_t q = absl::IDivDuration(a, b, &rem);
// // q == std::numeric_limits<int64_t>::max(), rem == a - b * q
-inline int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
- return time_internal::IDivDuration(true, num, den,
- rem); // trunc towards zero
-}
+int64_t IDivDuration(Duration num, Duration den, Duration* rem);
// FDivDuration()
//
@@ -409,6 +396,30 @@ inline int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
// // d == 1.5
ABSL_ATTRIBUTE_CONST_FUNCTION double FDivDuration(Duration num, Duration den);
+// Multiplicative Operators
+// Integer operands must be representable as int64_t.
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) {
+ return lhs *= rhs;
+}
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) {
+ return rhs *= lhs;
+}
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) {
+ return lhs /= rhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs,
+ Duration rhs) {
+ return IDivDuration(lhs, rhs,
+ &lhs); // trunc towards zero
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
+ Duration rhs) {
+ return lhs %= rhs;
+}
+
// ZeroDuration()
//
// Returns a zero-length duration. This function behaves just like the default
@@ -841,6 +852,11 @@ class Time {
private:
friend constexpr Time time_internal::FromUnixDuration(Duration d);
friend constexpr Duration time_internal::ToUnixDuration(Time t);
+
+#ifdef __cpp_lib_three_way_comparison
+ friend constexpr std::strong_ordering operator<=>(Time lhs, Time rhs);
+#endif // __cpp_lib_three_way_comparison
+
friend constexpr bool operator<(Time lhs, Time rhs);
friend constexpr bool operator==(Time lhs, Time rhs);
friend Duration operator-(Time lhs, Time rhs);
@@ -852,6 +868,15 @@ class Time {
};
// Relational Operators
+#ifdef __cpp_lib_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+ Time lhs, Time rhs) {
+ return lhs.rep_ <=> rhs.rep_;
+}
+
+#endif // __cpp_lib_three_way_comparison
+
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Time lhs, Time rhs) {
return lhs.rep_ < rhs.rep_;
}
@@ -1727,6 +1752,25 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs,
: time_internal::GetRepLo(lhs) < time_internal::GetRepLo(rhs);
}
+
+#ifdef __cpp_lib_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+ Duration lhs, Duration rhs) {
+ const int64_t lhs_hi = time_internal::GetRepHi(lhs);
+ const int64_t rhs_hi = time_internal::GetRepHi(rhs);
+ if (auto c = lhs_hi <=> rhs_hi; c != std::strong_ordering::equal) {
+ return c;
+ }
+ const uint32_t lhs_lo = time_internal::GetRepLo(lhs);
+ const uint32_t rhs_lo = time_internal::GetRepLo(rhs);
+ return (lhs_hi == (std::numeric_limits<int64_t>::min)())
+ ? (lhs_lo + 1) <=> (rhs_lo + 1)
+ : lhs_lo <=> rhs_lo;
+}
+
+#endif // __cpp_lib_three_way_comparison
+
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs,
Duration rhs) {
return time_internal::GetRepHi(lhs) == time_internal::GetRepHi(rhs) &&
diff --git a/contrib/restricted/abseil-cpp/absl/types/compare.h b/contrib/restricted/abseil-cpp/absl/types/compare.h
new file mode 100644
index 0000000000..72b7549b12
--- /dev/null
+++ b/contrib/restricted/abseil-cpp/absl/types/compare.h
@@ -0,0 +1,505 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// compare.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the `absl::partial_ordering`, `absl::weak_ordering`,
+// and `absl::strong_ordering` types for storing the results of three way
+// comparisons.
+//
+// Example:
+// absl::weak_ordering compare(const std::string& a, const std::string& b);
+//
+// These are C++11 compatible versions of the C++20 corresponding types
+// (`std::partial_ordering`, etc.) and are designed to be drop-in replacements
+// for code compliant with C++20.
+
+#ifndef ABSL_TYPES_COMPARE_H_
+#define ABSL_TYPES_COMPARE_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_USES_STD_ORDERING
+
+#include <compare> // IWYU pragma: export
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+
+#else
+
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/macros.h"
+#include "absl/meta/type_traits.h"
+
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+#ifdef ABSL_USES_STD_ORDERING
+
+using std::partial_ordering;
+using std::strong_ordering;
+using std::weak_ordering;
+
+#else
+
+namespace compare_internal {
+
+using value_type = int8_t;
+
+class OnlyLiteralZero {
+ public:
+#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__CUDACC__)
+ // On clang, we can avoid triggering modernize-use-nullptr by only enabling
+ // this overload when the value is a compile time integer constant equal to 0.
+ //
+ // In c++20, this could be a static_assert in a consteval function.
+ constexpr OnlyLiteralZero(int n) // NOLINT
+ __attribute__((enable_if(n == 0, "Only literal `0` is allowed."))) {}
+#else // ABSL_HAVE_ATTRIBUTE(enable_if)
+ // Accept only literal zero since it can be implicitly converted to a pointer
+ // to member type. nullptr constants will be caught by the other constructor
+ // which accepts a nullptr_t.
+ //
+ // This constructor is not used for clang since it triggers
+ // modernize-use-nullptr.
+ constexpr OnlyLiteralZero(int OnlyLiteralZero::*) noexcept {} // NOLINT
+#endif
+
+ // Fails compilation when `nullptr` or integral type arguments other than
+ // `int` are passed. This constructor doesn't accept `int` because literal `0`
+ // has type `int`. Literal `0` arguments will be implicitly converted to
+ // `std::nullptr_t` and accepted by the above constructor, while other `int`
+ // arguments will fail to be converted and cause compilation failure.
+ template <typename T, typename = typename std::enable_if<
+ std::is_same<T, std::nullptr_t>::value ||
+ (std::is_integral<T>::value &&
+ !std::is_same<T, int>::value)>::type>
+ OnlyLiteralZero(T) { // NOLINT
+ static_assert(sizeof(T) < 0, "Only literal `0` is allowed.");
+ }
+};
+
+enum class eq : value_type {
+ equal = 0,
+ equivalent = equal,
+ nonequal = 1,
+ nonequivalent = nonequal,
+};
+
+enum class ord : value_type { less = -1, greater = 1 };
+
+enum class ncmp : value_type { unordered = -127 };
+
+// Define macros to allow for creation or emulation of C++17 inline variables
+// based on whether the feature is supported. Note: we can't use
+// ABSL_INTERNAL_INLINE_CONSTEXPR here because the variables here are of
+// incomplete types so they need to be defined after the types are complete.
+#ifdef __cpp_inline_variables
+
+// A no-op expansion that can be followed by a semicolon at class level.
+#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) static_assert(true, "")
+
+#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) \
+ static const type name
+
+#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
+ inline constexpr type type::name(init)
+
+#else // __cpp_inline_variables
+
+#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) \
+ ABSL_CONST_INIT static const T name
+
+// A no-op expansion that can be followed by a semicolon at class level.
+#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) static_assert(true, "")
+
+#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
+ template <typename T> \
+ const T compare_internal::type##_base<T>::name(init)
+
+#endif // __cpp_inline_variables
+
+// These template base classes allow for defining the values of the constants
+// in the header file (for performance) without using inline variables (which
+// aren't available in C++11).
+template <typename T>
+struct partial_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(unordered);
+};
+
+template <typename T>
+struct weak_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+};
+
+template <typename T>
+struct strong_ordering_base {
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equal);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
+ ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
+};
+
+} // namespace compare_internal
+
+class partial_ordering
+ : public compare_internal::partial_ordering_base<partial_ordering> {
+ explicit constexpr partial_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr partial_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::partial_ordering_base<partial_ordering>;
+
+ constexpr bool is_ordered() const noexcept {
+ return value_ !=
+ compare_internal::value_type(compare_internal::ncmp::unordered);
+ }
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered);
+
+ // Comparisons
+ friend constexpr bool operator==(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.is_ordered() && v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return !v.is_ordered() || v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.is_ordered() && v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.is_ordered() && v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.is_ordered() && v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.is_ordered() && v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return !v.is_ordered() || 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
+ partial_ordering v) noexcept {
+ return v.is_ordered() && 0 >= v.value_;
+ }
+ friend constexpr bool operator==(partial_ordering v1,
+ partial_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(partial_ordering v1,
+ partial_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, greater,
+ compare_internal::ord::greater);
+ABSL_COMPARE_INLINE_INIT(partial_ordering, unordered,
+ compare_internal::ncmp::unordered);
+
+class weak_ordering
+ : public compare_internal::weak_ordering_base<weak_ordering> {
+ explicit constexpr weak_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr weak_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::weak_ordering_base<weak_ordering>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater);
+
+ // Conversions
+ constexpr operator partial_ordering() const noexcept { // NOLINT
+ return value_ == 0 ? partial_ordering::equivalent
+ : (value_ < 0 ? partial_ordering::less
+ : partial_ordering::greater);
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
+ weak_ordering v) noexcept {
+ return 0 >= v.value_;
+ }
+ friend constexpr bool operator==(weak_ordering v1,
+ weak_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(weak_ordering v1,
+ weak_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(weak_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(weak_ordering, greater,
+ compare_internal::ord::greater);
+
+class strong_ordering
+ : public compare_internal::strong_ordering_base<strong_ordering> {
+ explicit constexpr strong_ordering(compare_internal::eq v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ explicit constexpr strong_ordering(compare_internal::ord v) noexcept
+ : value_(static_cast<compare_internal::value_type>(v)) {}
+ friend struct compare_internal::strong_ordering_base<strong_ordering>;
+
+ public:
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent);
+ ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater);
+
+ // Conversions
+ constexpr operator partial_ordering() const noexcept { // NOLINT
+ return value_ == 0 ? partial_ordering::equivalent
+ : (value_ < 0 ? partial_ordering::less
+ : partial_ordering::greater);
+ }
+ constexpr operator weak_ordering() const noexcept { // NOLINT
+ return value_ == 0
+ ? weak_ordering::equivalent
+ : (value_ < 0 ? weak_ordering::less : weak_ordering::greater);
+ }
+ // Comparisons
+ friend constexpr bool operator==(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ == 0;
+ }
+ friend constexpr bool operator!=(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ != 0;
+ }
+ friend constexpr bool operator<(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ < 0;
+ }
+ friend constexpr bool operator<=(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ <= 0;
+ }
+ friend constexpr bool operator>(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ > 0;
+ }
+ friend constexpr bool operator>=(
+ strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
+ return v.value_ >= 0;
+ }
+ friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 == v.value_;
+ }
+ friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 != v.value_;
+ }
+ friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 < v.value_;
+ }
+ friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 <= v.value_;
+ }
+ friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 > v.value_;
+ }
+ friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
+ strong_ordering v) noexcept {
+ return 0 >= v.value_;
+ }
+ friend constexpr bool operator==(strong_ordering v1,
+ strong_ordering v2) noexcept {
+ return v1.value_ == v2.value_;
+ }
+ friend constexpr bool operator!=(strong_ordering v1,
+ strong_ordering v2) noexcept {
+ return v1.value_ != v2.value_;
+ }
+
+ private:
+ compare_internal::value_type value_;
+};
+ABSL_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, equivalent,
+ compare_internal::eq::equivalent);
+ABSL_COMPARE_INLINE_INIT(strong_ordering, greater,
+ compare_internal::ord::greater);
+
+#undef ABSL_COMPARE_INLINE_BASECLASS_DECL
+#undef ABSL_COMPARE_INLINE_SUBCLASS_DECL
+#undef ABSL_COMPARE_INLINE_INIT
+
+#endif // ABSL_USES_STD_ORDERING
+
+namespace compare_internal {
+// We also provide these comparator adapter functions for internal absl use.
+
+// Helper functions to do a boolean comparison of two keys given a boolean
+// or three-way comparator.
+// SFINAE prevents implicit conversions to bool (such as from int).
+template <typename BoolT,
+ absl::enable_if_t<std::is_same<bool, BoolT>::value, int> = 0>
+constexpr bool compare_result_as_less_than(const BoolT r) { return r; }
+constexpr bool compare_result_as_less_than(const absl::weak_ordering r) {
+ return r < 0;
+}
+
+template <typename Compare, typename K, typename LK>
+constexpr bool do_less_than_comparison(const Compare &compare, const K &x,
+ const LK &y) {
+ return compare_result_as_less_than(compare(x, y));
+}
+
+// Helper functions to do a three-way comparison of two keys given a boolean or
+// three-way comparator.
+// SFINAE prevents implicit conversions to int (such as from bool).
+template <typename Int,
+ absl::enable_if_t<std::is_same<int, Int>::value, int> = 0>
+constexpr absl::weak_ordering compare_result_as_ordering(const Int c) {
+ return c < 0 ? absl::weak_ordering::less
+ : c == 0 ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+}
+constexpr absl::weak_ordering compare_result_as_ordering(
+ const absl::weak_ordering c) {
+ return c;
+}
+
+template <
+ typename Compare, typename K, typename LK,
+ absl::enable_if_t<!std::is_same<bool, absl::result_of_t<Compare(
+ const K &, const LK &)>>::value,
+ int> = 0>
+constexpr absl::weak_ordering do_three_way_comparison(const Compare &compare,
+ const K &x, const LK &y) {
+ return compare_result_as_ordering(compare(x, y));
+}
+template <
+ typename Compare, typename K, typename LK,
+ absl::enable_if_t<std::is_same<bool, absl::result_of_t<Compare(
+ const K &, const LK &)>>::value,
+ int> = 0>
+constexpr absl::weak_ordering do_three_way_comparison(const Compare &compare,
+ const K &x, const LK &y) {
+ return compare(x, y) ? absl::weak_ordering::less
+ : compare(y, x) ? absl::weak_ordering::greater
+ : absl::weak_ordering::equivalent;
+}
+
+} // namespace compare_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_TYPES_COMPARE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/types/internal/optional.h b/contrib/restricted/abseil-cpp/absl/types/internal/optional.h
index a96d260a43..5731a5bcc9 100644
--- a/contrib/restricted/abseil-cpp/absl/types/internal/optional.h
+++ b/contrib/restricted/abseil-cpp/absl/types/internal/optional.h
@@ -81,7 +81,7 @@ class optional_data_dtor_base {
template <typename... Args>
constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
- : engaged_(true), data_(absl::forward<Args>(args)...) {}
+ : engaged_(true), data_(std::forward<Args>(args)...) {}
~optional_data_dtor_base() { destruct(); }
};
@@ -110,7 +110,7 @@ class optional_data_dtor_base<T, true> {
template <typename... Args>
constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
- : engaged_(true), data_(absl::forward<Args>(args)...) {}
+ : engaged_(true), data_(std::forward<Args>(args)...) {}
};
template <typename T>
diff --git a/contrib/restricted/abseil-cpp/absl/types/internal/variant.h b/contrib/restricted/abseil-cpp/absl/types/internal/variant.h
index 263d7b0947..4cb15f2921 100644
--- a/contrib/restricted/abseil-cpp/absl/types/internal/variant.h
+++ b/contrib/restricted/abseil-cpp/absl/types/internal/variant.h
@@ -26,6 +26,7 @@
#include <stdexcept>
#include <tuple>
#include <type_traits>
+#include <utility>
#include "absl/base/config.h"
#include "absl/base/internal/identity.h"
@@ -214,7 +215,7 @@ constexpr ReturnType call_with_indices(FunctionObject&& function) {
std::is_same<ReturnType, decltype(std::declval<FunctionObject>()(
SizeT<Indices>()...))>::value,
"Not all visitation overloads have the same return type.");
- return absl::forward<FunctionObject>(function)(SizeT<Indices>()...);
+ return std::forward<FunctionObject>(function)(SizeT<Indices>()...);
}
template <class ReturnType, class FunctionObject, std::size_t... BoundIndices>
@@ -272,27 +273,14 @@ struct UnreachableSwitchCase {
template <class Op>
[[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
Op&& /*ignored*/) {
-#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \
- (defined(__GNUC__) && !defined(__clang__))
- __builtin_unreachable();
-#elif defined(_MSC_VER)
- __assume(false);
-#else
- // Try to use assert of false being identified as an unreachable intrinsic.
- // NOTE: We use assert directly to increase chances of exploiting an assume
- // intrinsic.
- assert(false); // NOLINT
-
- // Hack to silence potential no return warning -- cause an infinite loop.
- return Run(absl::forward<Op>(op));
-#endif // Checks for __builtin_unreachable
+ ABSL_UNREACHABLE();
}
};
template <class Op, std::size_t I>
struct ReachableSwitchCase {
static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
- return absl::base_internal::invoke(absl::forward<Op>(op), SizeT<I>());
+ return absl::base_internal::invoke(std::forward<Op>(op), SizeT<I>());
}
};
@@ -357,74 +345,74 @@ struct VisitIndicesSwitch {
static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
switch (i) {
case 0:
- return PickCase<Op, 0, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 0, EndIndex>::Run(std::forward<Op>(op));
case 1:
- return PickCase<Op, 1, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 1, EndIndex>::Run(std::forward<Op>(op));
case 2:
- return PickCase<Op, 2, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 2, EndIndex>::Run(std::forward<Op>(op));
case 3:
- return PickCase<Op, 3, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 3, EndIndex>::Run(std::forward<Op>(op));
case 4:
- return PickCase<Op, 4, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 4, EndIndex>::Run(std::forward<Op>(op));
case 5:
- return PickCase<Op, 5, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 5, EndIndex>::Run(std::forward<Op>(op));
case 6:
- return PickCase<Op, 6, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 6, EndIndex>::Run(std::forward<Op>(op));
case 7:
- return PickCase<Op, 7, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 7, EndIndex>::Run(std::forward<Op>(op));
case 8:
- return PickCase<Op, 8, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 8, EndIndex>::Run(std::forward<Op>(op));
case 9:
- return PickCase<Op, 9, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 9, EndIndex>::Run(std::forward<Op>(op));
case 10:
- return PickCase<Op, 10, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 10, EndIndex>::Run(std::forward<Op>(op));
case 11:
- return PickCase<Op, 11, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 11, EndIndex>::Run(std::forward<Op>(op));
case 12:
- return PickCase<Op, 12, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 12, EndIndex>::Run(std::forward<Op>(op));
case 13:
- return PickCase<Op, 13, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 13, EndIndex>::Run(std::forward<Op>(op));
case 14:
- return PickCase<Op, 14, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 14, EndIndex>::Run(std::forward<Op>(op));
case 15:
- return PickCase<Op, 15, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 15, EndIndex>::Run(std::forward<Op>(op));
case 16:
- return PickCase<Op, 16, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 16, EndIndex>::Run(std::forward<Op>(op));
case 17:
- return PickCase<Op, 17, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 17, EndIndex>::Run(std::forward<Op>(op));
case 18:
- return PickCase<Op, 18, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 18, EndIndex>::Run(std::forward<Op>(op));
case 19:
- return PickCase<Op, 19, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 19, EndIndex>::Run(std::forward<Op>(op));
case 20:
- return PickCase<Op, 20, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 20, EndIndex>::Run(std::forward<Op>(op));
case 21:
- return PickCase<Op, 21, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 21, EndIndex>::Run(std::forward<Op>(op));
case 22:
- return PickCase<Op, 22, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 22, EndIndex>::Run(std::forward<Op>(op));
case 23:
- return PickCase<Op, 23, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 23, EndIndex>::Run(std::forward<Op>(op));
case 24:
- return PickCase<Op, 24, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 24, EndIndex>::Run(std::forward<Op>(op));
case 25:
- return PickCase<Op, 25, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 25, EndIndex>::Run(std::forward<Op>(op));
case 26:
- return PickCase<Op, 26, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 26, EndIndex>::Run(std::forward<Op>(op));
case 27:
- return PickCase<Op, 27, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 27, EndIndex>::Run(std::forward<Op>(op));
case 28:
- return PickCase<Op, 28, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 28, EndIndex>::Run(std::forward<Op>(op));
case 29:
- return PickCase<Op, 29, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 29, EndIndex>::Run(std::forward<Op>(op));
case 30:
- return PickCase<Op, 30, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 30, EndIndex>::Run(std::forward<Op>(op));
case 31:
- return PickCase<Op, 31, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 31, EndIndex>::Run(std::forward<Op>(op));
case 32:
- return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op));
+ return PickCase<Op, 32, EndIndex>::Run(std::forward<Op>(op));
default:
ABSL_ASSERT(i == variant_npos);
- return absl::base_internal::invoke(absl::forward<Op>(op), NPos());
+ return absl::base_internal::invoke(std::forward<Op>(op), NPos());
}
}
};
@@ -437,7 +425,7 @@ struct VisitIndicesFallback {
MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
index_sequence<(EndIndices + 1)...>,
index_sequence<>>::Run(),
- (indices + 1)...)(absl::forward<Op>(op));
+ (indices + 1)...)(std::forward<Op>(op));
}
};
@@ -489,7 +477,7 @@ struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> {
VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
SizeT<I> /*index*/) && {
return base_internal::invoke(
- absl::forward<Op>(op),
+ std::forward<Op>(op),
SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
std::size_t{1}>()...);
}
@@ -501,7 +489,7 @@ struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> {
static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(Op&& op,
SizeType... i) {
return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
- FlattenedOp<Op>{absl::forward<Op>(op)},
+ FlattenedOp<Op>{std::forward<Op>(op)},
FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
(i + std::size_t{1})...));
}
@@ -612,7 +600,7 @@ struct VariantCoreAccess {
TypedThrowBadVariantAccess<VariantAccessResult<I, Variant>>();
}
- return Access<I>(absl::forward<Variant>(self));
+ return Access<I>(std::forward<Variant>(self));
}
// The implementation of the move-assignment operation for a variant.
@@ -684,7 +672,7 @@ struct VariantCoreAccess {
void operator()(SizeT<NewIndex::value> /*old_i*/
) const {
- Access<NewIndex::value>(*left) = absl::forward<QualifiedNew>(other);
+ Access<NewIndex::value>(*left) = std::forward<QualifiedNew>(other);
}
template <std::size_t OldIndex>
@@ -695,13 +683,13 @@ struct VariantCoreAccess {
if (std::is_nothrow_constructible<New, QualifiedNew>::value ||
!std::is_nothrow_move_constructible<New>::value) {
left->template emplace<NewIndex::value>(
- absl::forward<QualifiedNew>(other));
+ std::forward<QualifiedNew>(other));
} else {
// the standard says "equivalent to
// operator=(variant(std::forward<T>(t)))", but we use `emplace` here
// because the variant's move assignment operator could be deleted.
left->template emplace<NewIndex::value>(
- New(absl::forward<QualifiedNew>(other)));
+ New(std::forward<QualifiedNew>(other)));
}
}
@@ -712,7 +700,7 @@ struct VariantCoreAccess {
template <class Left, class QualifiedNew>
static ConversionAssignVisitor<Left, QualifiedNew>
MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) {
- return {left, absl::forward<QualifiedNew>(qual)};
+ return {left, std::forward<QualifiedNew>(qual)};
}
// Backend for operations for `emplace()` which destructs `*self` then
@@ -723,7 +711,7 @@ struct VariantCoreAccess {
Destroy(*self);
using New = typename absl::variant_alternative<NewIndex, Self>::type;
New* const result = ::new (static_cast<void*>(&self->state_))
- New(absl::forward<Args>(args)...);
+ New(std::forward<Args>(args)...);
self->index_ = NewIndex;
return *result;
}
@@ -919,9 +907,9 @@ struct PerformVisitation {
Is, QualifiedVariants>...)>>::value,
"All visitation overloads must have the same return type.");
return absl::base_internal::invoke(
- absl::forward<Op>(op),
+ std::forward<Op>(op),
VariantCoreAccess::Access<Is>(
- absl::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
+ std::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
}
template <std::size_t... TupIs, std::size_t... Is>
@@ -969,11 +957,11 @@ union Union<Head, Tail...> {
template <class... P>
explicit constexpr Union(EmplaceTag<0>, P&&... args)
- : head(absl::forward<P>(args)...) {}
+ : head(std::forward<P>(args)...) {}
template <std::size_t I, class... P>
explicit constexpr Union(EmplaceTag<I>, P&&... args)
- : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {}
+ : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
Head head;
TailUnion tail;
@@ -1001,11 +989,11 @@ union DestructibleUnionImpl<Head, Tail...> {
template <class... P>
explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args)
- : head(absl::forward<P>(args)...) {}
+ : head(std::forward<P>(args)...) {}
template <std::size_t I, class... P>
explicit constexpr DestructibleUnionImpl(EmplaceTag<I>, P&&... args)
- : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {}
+ : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
~DestructibleUnionImpl() {}
@@ -1036,7 +1024,7 @@ class VariantStateBase {
template <std::size_t I, class... P>
explicit constexpr VariantStateBase(EmplaceTag<I> tag, P&&... args)
- : state_(tag, absl::forward<P>(args)...), index_(I) {}
+ : state_(tag, std::forward<P>(args)...), index_(I) {}
explicit constexpr VariantStateBase(NoopConstructorTag)
: state_(NoopConstructorTag()), index_(variant_npos) {}
@@ -1321,7 +1309,7 @@ class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor<T...> {
using Alternative =
typename absl::variant_alternative<I, variant<T...>>::type;
::new (static_cast<void*>(&self->state_)) Alternative(
- variant_internal::AccessUnion(absl::move(other->state_), i));
+ variant_internal::AccessUnion(std::move(other->state_), i));
}
void operator()(SizeT<absl::variant_npos> /*i*/) const {}
diff --git a/contrib/restricted/abseil-cpp/absl/types/optional.h b/contrib/restricted/abseil-cpp/absl/types/optional.h
index 395fe62f29..cf7249cbe3 100644
--- a/contrib/restricted/abseil-cpp/absl/types/optional.h
+++ b/contrib/restricted/abseil-cpp/absl/types/optional.h
@@ -151,7 +151,7 @@ class optional : private optional_internal::optional_data<T>,
std::is_same<InPlaceT, in_place_t>,
std::is_constructible<T, Args&&...> >::value>* = nullptr>
constexpr explicit optional(InPlaceT, Args&&... args)
- : data_base(in_place_t(), absl::forward<Args>(args)...) {}
+ : data_base(in_place_t(), std::forward<Args>(args)...) {}
// Constructs a non-empty `optional` direct-initialized value of type `T` from
// the arguments of an initializer_list and `std::forward<Args>(args)...`.
@@ -162,8 +162,7 @@ class optional : private optional_internal::optional_data<T>,
T, std::initializer_list<U>&, Args&&...>::value>::type>
constexpr explicit optional(in_place_t, std::initializer_list<U> il,
Args&&... args)
- : data_base(in_place_t(), il, absl::forward<Args>(args)...) {
- }
+ : data_base(in_place_t(), il, std::forward<Args>(args)...) {}
// Value constructor (implicit)
template <
@@ -176,21 +175,21 @@ class optional : private optional_internal::optional_data<T>,
std::is_convertible<U&&, T>,
std::is_constructible<T, U&&> >::value,
bool>::type = false>
- constexpr optional(U&& v) : data_base(in_place_t(), absl::forward<U>(v)) {}
+ constexpr optional(U&& v) : data_base(in_place_t(), std::forward<U>(v)) {}
// Value constructor (explicit)
template <
typename U = T,
typename std::enable_if<
absl::conjunction<absl::negation<std::is_same<
- in_place_t, typename std::decay<U>::type>>,
+ in_place_t, typename std::decay<U>::type> >,
absl::negation<std::is_same<
- optional<T>, typename std::decay<U>::type>>,
- absl::negation<std::is_convertible<U&&, T>>,
- std::is_constructible<T, U&&>>::value,
+ optional<T>, typename std::decay<U>::type> >,
+ absl::negation<std::is_convertible<U&&, T> >,
+ std::is_constructible<T, U&&> >::value,
bool>::type = false>
explicit constexpr optional(U&& v)
- : data_base(in_place_t(), absl::forward<U>(v)) {}
+ : data_base(in_place_t(), std::forward<U>(v)) {}
// Converting copy constructor (implicit)
template <typename U,
@@ -437,7 +436,7 @@ class optional : private optional_internal::optional_data<T>,
return reference();
}
constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference());
+ return ABSL_HARDENING_ASSERT(this->engaged_), std::move(reference());
}
T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(this->engaged_);
@@ -492,7 +491,7 @@ class optional : private optional_internal::optional_data<T>,
}
constexpr const T&& value()
const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
- return absl::move(
+ return std::move(
static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference()));
@@ -511,9 +510,8 @@ class optional : private optional_internal::optional_data<T>,
"optional<T>::value_or: T must be copy constructible");
static_assert(std::is_convertible<U&&, value_type>::value,
"optional<T>::value_or: U must be convertible to T");
- return static_cast<bool>(*this)
- ? **this
- : static_cast<T>(absl::forward<U>(v));
+ return static_cast<bool>(*this) ? **this
+ : static_cast<T>(std::forward<U>(v));
}
template <typename U>
T value_or(U&& v) && { // NOLINT(build/c++11)
@@ -573,19 +571,18 @@ void swap(optional<T>& a, optional<T>& b) noexcept(noexcept(a.swap(b))) {
// static_assert(opt.value() == 1, "");
template <typename T>
constexpr optional<typename std::decay<T>::type> make_optional(T&& v) {
- return optional<typename std::decay<T>::type>(absl::forward<T>(v));
+ return optional<typename std::decay<T>::type>(std::forward<T>(v));
}
template <typename T, typename... Args>
constexpr optional<T> make_optional(Args&&... args) {
- return optional<T>(in_place_t(), absl::forward<Args>(args)...);
+ return optional<T>(in_place_t(), std::forward<Args>(args)...);
}
template <typename T, typename U, typename... Args>
constexpr optional<T> make_optional(std::initializer_list<U> il,
Args&&... args) {
- return optional<T>(in_place_t(), il,
- absl::forward<Args>(args)...);
+ return optional<T>(in_place_t(), il, std::forward<Args>(args)...);
}
// Relational operators [optional.relops]
diff --git a/contrib/restricted/abseil-cpp/absl/types/span.h b/contrib/restricted/abseil-cpp/absl/types/span.h
index 88cd759540..a0f80272e9 100644
--- a/contrib/restricted/abseil-cpp/absl/types/span.h
+++ b/contrib/restricted/abseil-cpp/absl/types/span.h
@@ -43,7 +43,7 @@
// * A read-only `absl::Span<const T>` can be implicitly constructed from an
// initializer list.
// * `absl::Span` has no `bytes()`, `size_bytes()`, `as_bytes()`, or
-// `as_mutable_bytes()` methods
+// `as_writable_bytes()` methods
// * `absl::Span` has no static extent template parameter, nor constructors
// which exist only because of the static extent parameter.
// * `absl::Span` has an explicit mutable-reference constructor
@@ -151,7 +151,7 @@ ABSL_NAMESPACE_BEGIN
// int* my_array = new int[10];
// MyRoutine(absl::Span<const int>(my_array, 10));
template <typename T>
-class Span {
+class ABSL_INTERNAL_ATTRIBUTE_VIEW Span {
private:
// Used to determine whether a Span can be constructed from a container of
// type C.
@@ -185,6 +185,7 @@ class Span {
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
using size_type = size_t;
using difference_type = ptrdiff_t;
+ using absl_internal_is_view = std::true_type;
static const size_type npos = ~(size_type(0));
diff --git a/contrib/restricted/abseil-cpp/absl/types/variant.h b/contrib/restricted/abseil-cpp/absl/types/variant.h
index ac93464bf8..56a7e05ee6 100644
--- a/contrib/restricted/abseil-cpp/absl/types/variant.h
+++ b/contrib/restricted/abseil-cpp/absl/types/variant.h
@@ -303,11 +303,10 @@ constexpr T& get(variant<Types...>& v) { // NOLINT
}
// Overload for getting a variant's rvalue by type.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
template <class T, class... Types>
constexpr T&& get(variant<Types...>&& v) {
return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(absl::move(v));
+ variant_internal::IndexOf<T, Types...>::value>(std::move(v));
}
// Overload for getting a variant's const lvalue by type.
@@ -318,11 +317,10 @@ constexpr const T& get(const variant<Types...>& v) {
}
// Overload for getting a variant's const rvalue by type.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
template <class T, class... Types>
constexpr const T&& get(const variant<Types...>&& v) {
return variant_internal::VariantCoreAccess::CheckedAccess<
- variant_internal::IndexOf<T, Types...>::value>(absl::move(v));
+ variant_internal::IndexOf<T, Types...>::value>(std::move(v));
}
// Overload for getting a variant's lvalue by index.
@@ -333,11 +331,10 @@ constexpr variant_alternative_t<I, variant<Types...>>& get(
}
// Overload for getting a variant's rvalue by index.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
template <std::size_t I, class... Types>
constexpr variant_alternative_t<I, variant<Types...>>&& get(
variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v));
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
}
// Overload for getting a variant's const lvalue by index.
@@ -348,11 +345,10 @@ constexpr const variant_alternative_t<I, variant<Types...>>& get(
}
// Overload for getting a variant's const rvalue by index.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
template <std::size_t I, class... Types>
constexpr const variant_alternative_t<I, variant<Types...>>&& get(
const variant<Types...>&& v) {
- return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v));
+ return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
}
// get_if()
@@ -432,8 +428,8 @@ variant_internal::VisitResult<Visitor, Variants...> visit(Visitor&& vis,
return variant_internal::
VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run(
variant_internal::PerformVisitation<Visitor, Variants...>{
- std::forward_as_tuple(absl::forward<Variants>(vars)...),
- absl::forward<Visitor>(vis)},
+ std::forward_as_tuple(std::forward<Variants>(vars)...),
+ std::forward<Visitor>(vis)},
vars.index()...);
}
@@ -504,13 +500,12 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
class T,
std::size_t I = std::enable_if<
variant_internal::IsNeitherSelfNorInPlace<variant,
- absl::decay_t<T>>::value,
- variant_internal::IndexOfConstructedType<variant, T>>::type::value,
+ absl::decay_t<T> >::value,
+ variant_internal::IndexOfConstructedType<variant, T> >::type::value,
class Tj = absl::variant_alternative_t<I, variant>,
- absl::enable_if_t<std::is_constructible<Tj, T>::value>* =
- nullptr>
+ absl::enable_if_t<std::is_constructible<Tj, T>::value>* = nullptr>
constexpr variant(T&& t) noexcept(std::is_nothrow_constructible<Tj, T>::value)
- : Base(variant_internal::EmplaceTag<I>(), absl::forward<T>(t)) {}
+ : Base(variant_internal::EmplaceTag<I>(), std::forward<T>(t)) {}
// Constructs a variant of an alternative type from the arguments through
// direct-initialization.
@@ -524,7 +519,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
constexpr explicit variant(in_place_type_t<T>, Args&&... args)
: Base(variant_internal::EmplaceTag<
variant_internal::UnambiguousIndexOf<variant, T>::value>(),
- absl::forward<Args>(args)...) {}
+ std::forward<Args>(args)...) {}
// Constructs a variant of an alternative type from an initializer list
// and other arguments through direct-initialization.
@@ -539,7 +534,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
Args&&... args)
: Base(variant_internal::EmplaceTag<
variant_internal::UnambiguousIndexOf<variant, T>::value>(),
- il, absl::forward<Args>(args)...) {}
+ il, std::forward<Args>(args)...) {}
// Constructs a variant of an alternative type from a provided index,
// through value-initialization using the provided forwarded arguments.
@@ -548,7 +543,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
variant_internal::VariantAlternativeSfinaeT<I, variant>,
Args...>::value>::type* = nullptr>
constexpr explicit variant(in_place_index_t<I>, Args&&... args)
- : Base(variant_internal::EmplaceTag<I>(), absl::forward<Args>(args)...) {}
+ : Base(variant_internal::EmplaceTag<I>(), std::forward<Args>(args)...) {}
// Constructs a variant of an alternative type from a provided index,
// through value-initialization of an initializer list and the provided
@@ -560,7 +555,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
constexpr explicit variant(in_place_index_t<I>, std::initializer_list<U> il,
Args&&... args)
: Base(variant_internal::EmplaceTag<I>(), il,
- absl::forward<Args>(args)...) {}
+ std::forward<Args>(args)...) {}
// Destructors
@@ -595,7 +590,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
std::is_nothrow_constructible<Tj, T>::value) {
variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
- this, absl::forward<T>(t)),
+ this, std::forward<T>(t)),
index());
return *this;
@@ -623,7 +618,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
T& emplace(Args&&... args) {
return variant_internal::VariantCoreAccess::Replace<
variant_internal::UnambiguousIndexOf<variant, T>::value>(
- this, absl::forward<Args>(args)...);
+ this, std::forward<Args>(args)...);
}
// Constructs a value of the given alternative type T within the variant using
@@ -644,7 +639,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
T& emplace(std::initializer_list<U> il, Args&&... args) {
return variant_internal::VariantCoreAccess::Replace<
variant_internal::UnambiguousIndexOf<variant, T>::value>(
- this, il, absl::forward<Args>(args)...);
+ this, il, std::forward<Args>(args)...);
}
// Destroys the current value of the variant (provided that
@@ -663,7 +658,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
Args...>::value>::type* = nullptr>
absl::variant_alternative_t<I, variant>& emplace(Args&&... args) {
return variant_internal::VariantCoreAccess::Replace<I>(
- this, absl::forward<Args>(args)...);
+ this, std::forward<Args>(args)...);
}
// Destroys the current value of the variant (provided that
@@ -681,7 +676,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
absl::variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il,
Args&&... args) {
return variant_internal::VariantCoreAccess::Replace<I>(
- this, il, absl::forward<Args>(args)...);
+ this, il, std::forward<Args>(args)...);
}
// variant::valueless_by_exception()
diff --git a/contrib/restricted/abseil-cpp/absl/utility/utility.h b/contrib/restricted/abseil-cpp/absl/utility/utility.h
index fc0d1f6551..ebbb49b715 100644
--- a/contrib/restricted/abseil-cpp/absl/utility/utility.h
+++ b/contrib/restricted/abseil-cpp/absl/utility/utility.h
@@ -51,11 +51,14 @@ ABSL_NAMESPACE_BEGIN
// abstractions for platforms that had not yet provided them. Those
// platforms are no longer supported. New code should simply use the
// the ones from std directly.
+using std::exchange;
+using std::forward;
using std::index_sequence;
using std::index_sequence_for;
using std::integer_sequence;
using std::make_index_sequence;
using std::make_integer_sequence;
+using std::move;
namespace utility_internal {
@@ -129,27 +132,6 @@ template <size_t I>
void in_place_index(utility_internal::InPlaceIndexTag<I>) {}
#endif // ABSL_USES_STD_VARIANT
-// Constexpr move and forward
-
-// move()
-//
-// A constexpr version of `std::move()`, designed to be a drop-in replacement
-// for C++14's `std::move()`.
-template <typename T>
-constexpr absl::remove_reference_t<T>&& move(T&& t) noexcept {
- return static_cast<absl::remove_reference_t<T>&&>(t);
-}
-
-// forward()
-//
-// A constexpr version of `std::forward()`, designed to be a drop-in replacement
-// for C++14's `std::forward()`.
-template <typename T>
-constexpr T&& forward(
- absl::remove_reference_t<T>& t) noexcept { // NOLINT(runtime/references)
- return static_cast<T&&>(t);
-}
-
namespace utility_internal {
// Helper method for expanding tuple into a called method.
template <typename Functor, typename Tuple, std::size_t... Indexes>
@@ -215,26 +197,6 @@ auto apply(Functor&& functor, Tuple&& t)
typename std::remove_reference<Tuple>::type>::value>{});
}
-// exchange
-//
-// Replaces the value of `obj` with `new_value` and returns the old value of
-// `obj`. `absl::exchange` is designed to be a drop-in replacement for C++14's
-// `std::exchange`.
-//
-// Example:
-//
-// Foo& operator=(Foo&& other) {
-// ptr1_ = absl::exchange(other.ptr1_, nullptr);
-// int1_ = absl::exchange(other.int1_, -1);
-// return *this;
-// }
-template <typename T, typename U = T>
-T exchange(T& obj, U&& new_value) {
- T old_value = absl::move(obj);
- obj = absl::forward<U>(new_value);
- return old_value;
-}
-
namespace utility_internal {
template <typename T, typename Tuple, size_t... I>
T make_from_tuple_impl(Tuple&& tup, absl::index_sequence<I...>) {
diff --git a/contrib/restricted/abseil-cpp/absl/utility/ya.make b/contrib/restricted/abseil-cpp/absl/utility/ya.make
index ac5695f072..467248d279 100644
--- a/contrib/restricted/abseil-cpp/absl/utility/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/utility/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
NO_RUNTIME()
diff --git a/contrib/restricted/abseil-cpp/ya.make b/contrib/restricted/abseil-cpp/ya.make
index 384f80ac5f..a9c4b4ba72 100644
--- a/contrib/restricted/abseil-cpp/ya.make
+++ b/contrib/restricted/abseil-cpp/ya.make
@@ -1,4 +1,4 @@
-# Generated by devtools/yamaker from nixpkgs 22.11.
+# Generated by devtools/yamaker from nixpkgs 24.05.
LIBRARY()
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20240116.2)
+VERSION(20240722.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240116.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20240722.0.tar.gz)
PEERDIR(
contrib/restricted/abseil-cpp/absl/algorithm