aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.com>2022-08-31 10:58:13 +0300
committerthegeorg <thegeorg@yandex-team.com>2022-08-31 10:58:13 +0300
commitdb58955e8e90ba21f2d9b82fa9af131f40ff1c6f (patch)
tree09c50f24532f69bc07d3ff991cf581c76ef5482a
parent665b832313a6f533823ec2187458f2b33a70b093 (diff)
downloadydb-db58955e8e90ba21f2d9b82fa9af131f40ff1c6f.tar.gz
Update contrib/restricted/abseil-cpp-tstring to 20220623.0
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h71
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h129
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h223
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc54
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h69
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h79
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h54
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h138
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc51
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc13
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h51
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h30
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h29
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc88
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h40
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h97
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h699
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/CMakeLists.txt1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc133
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc222
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc38
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc44
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h19
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc53
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/config.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/declare.h10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/flag.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h94
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h90
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h241
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h27
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc176
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/CMakeLists.txt3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc1107
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h283
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc188
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h44
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.cc (renamed from contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc)26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h572
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_data_edge.h63
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h211
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc198
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h103
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc81
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.cc54
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.h102
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h75
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc49
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h45
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h49
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h33
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc115
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h26
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h3
135 files changed, 4826 insertions, 2605 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
index 3abb26f1b3..b34b4f498b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
@@ -166,7 +166,7 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
// c_all_of()
//
// Container-based version of the <algorithm> `std::all_of()` function to
-// test a condition on all elements within a container.
+// test if all elements within a container satisfy a condition.
template <typename C, typename Pred>
bool c_all_of(const C& c, Pred&& pred) {
return std::all_of(container_algorithm_internal::c_begin(c),
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
index 66751eaddf..21e64f43b3 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
@@ -136,9 +136,10 @@
// for further information.
// The MinGW compiler doesn't complain about the weak attribute until the link
// step, presumably because Windows doesn't use ELF binaries.
-#if (Y_ABSL_HAVE_ATTRIBUTE(weak) || \
- (defined(__GNUC__) && !defined(__clang__))) && \
- (!defined(_WIN32) || __clang_major__ < 9) && !defined(__MINGW32__)
+#if (Y_ABSL_HAVE_ATTRIBUTE(weak) || \
+ (defined(__GNUC__) && !defined(__clang__))) && \
+ (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
+ !defined(__MINGW32__)
#undef Y_ABSL_ATTRIBUTE_WEAK
#define Y_ABSL_ATTRIBUTE_WEAK __attribute__((weak))
#define Y_ABSL_HAVE_ATTRIBUTE_WEAK 1
@@ -212,6 +213,9 @@
// https://gcc.gnu.org/gcc-4.8/changes.html
#if Y_ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#elif defined(_MSC_VER) && _MSC_VER >= 1928
+// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
+#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
#else
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
@@ -311,7 +315,6 @@
__attribute__((section(#name))) __attribute__((noinline))
#endif
-
// Y_ABSL_ATTRIBUTE_SECTION_VARIABLE
//
// Tells the compiler/linker to put a given variable into a section and define
@@ -338,8 +341,8 @@
// a no-op on ELF but not on Mach-O.
//
#ifndef Y_ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
-#define Y_ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
- extern char __start_##name[] Y_ABSL_ATTRIBUTE_WEAK; \
+#define Y_ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
+ extern char __start_##name[] Y_ABSL_ATTRIBUTE_WEAK; \
extern char __stop_##name[] Y_ABSL_ATTRIBUTE_WEAK
#endif
#ifndef Y_ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
@@ -400,6 +403,9 @@
//
// Tells the compiler to warn about unused results.
//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard `[[nodiscard]]` directly over this macro.
+//
// When annotating a function, it must appear as the first part of the
// declaration or definition. The compiler will warn if the return value from
// such a function is unused:
@@ -426,9 +432,10 @@
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
//
// Note: past advice was to place the macro after the argument list.
-#if Y_ABSL_HAVE_ATTRIBUTE(nodiscard)
-#define Y_ABSL_MUST_USE_RESULT [[nodiscard]]
-#elif defined(__clang__) && Y_ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+//
+// TODO(b/176172494): Use Y_ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is
+// compliant with the stricter [[nodiscard]].
+#if defined(__clang__) && Y_ABSL_HAVE_ATTRIBUTE(warn_unused_result)
#define Y_ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
#else
#define Y_ABSL_MUST_USE_RESULT
@@ -498,7 +505,7 @@
#define Y_ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
#if Y_ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
#define Y_ABSL_XRAY_LOG_ARGS(N) \
- [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+ [[clang::xray_always_instrument, clang::xray_log_args(N)]]
#else
#define Y_ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
#endif
@@ -642,6 +649,9 @@
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
+// For code or headers that are assured to only build with C++14 and up, prefer
+// just using the standard `[[deprecated("message")]]` directly over this macro.
+//
// Examples:
//
// class Y_ABSL_DEPRECATED("Use Bar instead") Foo {...};
@@ -652,14 +662,17 @@
// Y_ABSL_DEPRECATED("Use DoThat() instead")
// void DoThis();
//
+// enum FooEnum {
+// kBar Y_ABSL_DEPRECATED("Use kBaz instead"),
+// };
+//
// Every usage of a deprecated entity will trigger a warning when compiled with
-// clang's `-Wdeprecated-declarations` option. This option is turned off by
-// default, but the warnings will be reported by clang-tidy.
-#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
+// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain
+// turns this warning off by default, instead relying on clang-tidy to report
+// new uses of deprecated code.
+#if Y_ABSL_HAVE_ATTRIBUTE(deprecated)
#define Y_ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
-#endif
-
-#ifndef Y_ABSL_DEPRECATED
+#else
#define Y_ABSL_DEPRECATED(message)
#endif
@@ -669,9 +682,18 @@
// not compile (on supported platforms) unless the variable has a constant
// initializer. This is useful for variables with static and thread storage
// duration, because it guarantees that they will not suffer from the so-called
-// "static init order fiasco". Prefer to put this attribute on the most visible
-// declaration of the variable, if there's more than one, because code that
-// accesses the variable can then use the attribute for optimization.
+// "static init order fiasco".
+//
+// This attribute must be placed on the initializing declaration of the
+// variable. Some compilers will give a -Wmissing-constinit warning when this
+// attribute is placed on some other declaration but missing from the
+// initializing declaration.
+//
+// In some cases (notably with thread_local variables), `Y_ABSL_CONST_INIT` can
+// also be used in a non-initializing declaration to tell the compiler that a
+// variable is already initialized, reducing overhead that would otherwise be
+// incurred by a hidden guard variable. Thus annotating all declarations with
+// this attribute is recommended to potentially enhance optimization.
//
// Example:
//
@@ -680,14 +702,19 @@
// Y_ABSL_CONST_INIT static MyType my_var;
// };
//
-// MyType MyClass::my_var = MakeMyType(...);
+// Y_ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...);
+//
+// For code or headers that are assured to only build with C++20 and up, prefer
+// just using the standard `constinit` keyword directly over this macro.
//
// Note that this attribute is redundant if the variable is declared constexpr.
-#if Y_ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#if defined(__cpp_constinit) && __cpp_constinit >= 201907L
+#define Y_ABSL_CONST_INIT constinit
+#elif Y_ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
#define Y_ABSL_CONST_INIT [[clang::require_constant_initialization]]
#else
#define Y_ABSL_CONST_INIT
-#endif // Y_ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#endif
// Y_ABSL_ATTRIBUTE_PURE_FUNCTION
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
index 4ea832f441..e78b28c53b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
@@ -29,6 +29,10 @@
#include <type_traits>
#include <utility>
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+#include <bit> // For std::bit_cast.
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
#include "y_absl/base/internal/identity.h"
#include "y_absl/base/macros.h"
#include "y_absl/meta/type_traits.h"
@@ -36,19 +40,6 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-namespace internal_casts {
-
-template <class Dest, class Source>
-struct is_bitcastable
- : std::integral_constant<
- bool,
- sizeof(Dest) == sizeof(Source) &&
- type_traits_internal::is_trivially_copyable<Source>::value &&
- type_traits_internal::is_trivially_copyable<Dest>::value &&
- std::is_default_constructible<Dest>::value> {};
-
-} // namespace internal_casts
-
// implicit_cast()
//
// Performs an implicit conversion between types following the language
@@ -105,81 +96,83 @@ constexpr To implicit_cast(typename y_absl::internal::identity_t<To> to) {
// bit_cast()
//
-// Performs a bitwise cast on a type without changing the underlying bit
-// representation of that type's value. The two types must be of the same size
-// and both types must be trivially copyable. As with most casts, use with
-// caution. A `bit_cast()` might be needed when you need to temporarily treat a
-// type as some other type, such as in the following cases:
+// Creates a value of the new type `Dest` whose representation is the same as
+// that of the argument, which is of (deduced) type `Source` (a "bitwise cast";
+// every bit in the value representation of the result is equal to the
+// corresponding bit in the object representation of the source). Source and
+// destination types must be of the same size, and both types must be trivially
+// copyable.
//
-// * Serialization (casting temporarily to `char *` for those purposes is
-// always allowed by the C++ standard)
-// * Managing the individual bits of a type within mathematical operations
-// that are not normally accessible through that type
-// * Casting non-pointer types to pointer types (casting the other way is
-// allowed by `reinterpret_cast()` but round-trips cannot occur the other
-// way).
-//
-// Example:
+// As with most casts, use with caution. A `bit_cast()` might be needed when you
+// need to treat a value as the value of some other type, for example, to access
+// the individual bits of an object which are not normally accessible through
+// the object's type, such as for working with the binary representation of a
+// floating point value:
//
// float f = 3.14159265358979;
-// int i = bit_cast<int32_t>(f);
+// int i = bit_cast<int>(f);
// // i = 0x40490fdb
//
-// Casting non-pointer types to pointer types and then dereferencing them
-// traditionally produces undefined behavior.
+// Reinterpreting and accessing a value directly as a different type (as shown
+// below) usually results in undefined behavior.
//
// Example:
//
// // WRONG
-// float f = 3.14159265358979; // WRONG
-// int i = * reinterpret_cast<int*>(&f); // WRONG
+// float f = 3.14159265358979;
+// int i = reinterpret_cast<int&>(f); // Wrong
+// int j = *reinterpret_cast<int*>(&f); // Equally wrong
+// int k = *bit_cast<int*>(&f); // Equally wrong
//
-// The address-casting method produces undefined behavior according to the ISO
-// C++ specification section [basic.lval]. Roughly, this section says: if an
-// object in memory has one type, and a program accesses it with a different
-// type, the result is undefined behavior for most values of "different type".
+// Reinterpret-casting results in undefined behavior according to the ISO C++
+// specification, section [basic.lval]. Roughly, this section says: if an object
+// in memory has one type, and a program accesses it with a different type, the
+// result is undefined behavior for most "different type".
+//
+// Using bit_cast on a pointer and then dereferencing it is no better than using
+// reinterpret_cast. You should only use bit_cast on the value itself.
//
// Such casting results in type punning: holding an object in memory of one type
// and reading its bits back using a different type. A `bit_cast()` avoids this
-// issue by implementing its casts using `memcpy()`, which avoids introducing
-// this undefined behavior.
-//
-// NOTE: The requirements here are more strict than the bit_cast of standard
-// proposal p0476 due to the need for workarounds and lack of intrinsics.
-// Specifically, this implementation also requires `Dest` to be
-// default-constructible.
-template <
- typename Dest, typename Source,
- typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
- int>::type = 0>
+// issue by copying the object representation to a new value, which avoids
+// introducing this undefined behavior (since the original value is never
+// accessed in the wrong way).
+//
+// The requirements of `y_absl::bit_cast` are more strict than that of
+// `std::bit_cast` unless compiler support is available. Specifically, without
+// compiler support, this implementation also requires `Dest` to be
+// default-constructible. In C++20, `y_absl::bit_cast` is replaced by
+// `std::bit_cast`.
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+using std::bit_cast;
+
+#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+template <typename Dest, typename Source,
+ typename std::enable_if<
+ sizeof(Dest) == sizeof(Source) &&
+ type_traits_internal::is_trivially_copyable<Source>::value &&
+ type_traits_internal::is_trivially_copyable<Dest>::value
+#if !Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ && std::is_default_constructible<Dest>::value
+#endif // !Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ ,
+ int>::type = 0>
+#if Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast) && (!defined(__CUDACC__) || CUDA_VERSION >= 11010)
+inline constexpr Dest bit_cast(const Source& source) {
+ return __builtin_bit_cast(Dest, source);
+}
+#else // Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
+#endif // Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
-// NOTE: This overload is only picked if the requirements of bit_cast are
-// not met. It is therefore UB, but is provided temporarily as previous
-// versions of this function template were unchecked. Do not use this in
-// new code.
-template <
- typename Dest, typename Source,
- typename std::enable_if<
- !internal_casts::is_bitcastable<Dest, Source>::value,
- int>::type = 0>
-Y_ABSL_DEPRECATED(
- "y_absl::bit_cast type requirements were violated. Update the types "
- "being used such that they are the same size and are both "
- "TriviallyCopyable.")
-inline Dest bit_cast(const Source& source) {
- static_assert(sizeof(Dest) == sizeof(Source),
- "Source and destination types should have equal sizes.");
-
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
index cb0bf305c9..334555260a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
@@ -56,6 +56,25 @@
#include <cstddef>
#endif // __cplusplus
+// Y_ABSL_INTERNAL_CPLUSPLUS_LANG
+//
+// MSVC does not set the value of __cplusplus correctly, but instead uses
+// _MSVC_LANG as a stand-in.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+//
+// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at
+// times, for example:
+// https://github.com/microsoft/vscode-cpptools/issues/1770
+// https://reviews.llvm.org/D70996
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(_MSVC_LANG)
+#define Y_ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG
+#elif defined(__cplusplus)
+#define Y_ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus
+#endif
+
#if defined(__APPLE__)
// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
// __IPHONE_8_0.
@@ -92,7 +111,7 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define Y_ABSL_LTS_RELEASE_VERSION 20211102
+#define Y_ABSL_LTS_RELEASE_VERSION 20220623
#define Y_ABSL_LTS_RELEASE_PATCH_LEVEL 0
// Helper macro to convert a CPP variable to a string literal.
@@ -183,12 +202,6 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_BUILTIN(x) 0
#endif
-#if defined(__is_identifier)
-#define Y_ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x))
-#else
-#define Y_ABSL_INTERNAL_HAS_KEYWORD(x) 0
-#endif
-
#ifdef __has_feature
#define Y_ABSL_HAVE_FEATURE(f) __has_feature(f)
#else
@@ -212,11 +225,12 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
// Y_ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
-// We assume __thread is supported on Linux when compiled with Clang or compiled
-// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+// We assume __thread is supported on Linux or Asylo when compiled with Clang or
+// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
#ifdef Y_ABSL_HAVE_TLS
#error Y_ABSL_HAVE_TLS cannot be directly set
-#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#elif (defined(__linux__) || defined(__ASYLO__)) && \
+ (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define Y_ABSL_HAVE_TLS 1
#endif
@@ -259,19 +273,6 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
-// Y_ABSL_HAVE_SOURCE_LOCATION_CURRENT
-//
-// Indicates whether `y_absl::SourceLocation::current()` will return useful
-// information in some contexts.
-#ifndef Y_ABSL_HAVE_SOURCE_LOCATION_CURRENT
-#if Y_ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \
- Y_ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
-#define Y_ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
-#elif Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0)
-#define Y_ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
-#endif
-#endif
-
// Y_ABSL_HAVE_THREAD_LOCAL
//
// Checks whether C++11's `thread_local` storage duration specifier is
@@ -414,7 +415,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
- defined(__HAIKU__)
+ defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
+ defined(__QNX__)
#define Y_ABSL_HAVE_MMAP 1
#endif
@@ -425,7 +427,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#ifdef Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#error Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
- defined(_AIX) || defined(__ros__)
+ defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \
+ defined(__NetBSD__)
#define Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
@@ -520,22 +523,41 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error "y_absl endian detection needs to be set up for your compiler"
#endif
-// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant>
-// even though the headers exist and are publicly noted to work. See
-// https://github.com/abseil/abseil-cpp/issues/207 and
+// macOS < 10.13 and iOS < 11 don't let you use <any>, <optional>, or <variant>
+// even though the headers exist and are publicly noted to work, because the
+// libc++ shared library shipped on the system doesn't have the requisite
+// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and
// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
+//
// libc++ spells out the availability requirements in the file
// llvm-project/libcxx/include/__config via the #define
// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
-#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
+//
+// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14
+// and iOS < 12 in the libc++ headers. This was corrected by
+// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
+// which subsequently made it into the XCode 12.5 release. We need to match the
+// old (incorrect) conditions when built with old XCode, but can use the
+// corrected earlier versions with new XCode.
+#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
+ ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \
+ (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))))
#define Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@@ -700,8 +722,6 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
#endif
-#undef Y_ABSL_INTERNAL_HAS_KEYWORD
-
// Y_ABSL_DLL
//
// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)`
@@ -727,8 +747,6 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// a compiler instrumentation module and a run-time library.
#ifdef Y_ABSL_HAVE_MEMORY_SANITIZER
#error "Y_ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
-#elif defined(__SANITIZE_MEMORY__)
-#define Y_ABSL_HAVE_MEMORY_SANITIZER 1
#elif !defined(__native_client__) && Y_ABSL_HAVE_FEATURE(memory_sanitizer)
#define Y_ABSL_HAVE_MEMORY_SANITIZER 1
#endif
@@ -755,6 +773,45 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
+// Y_ABSL_HAVE_HWADDRESS_SANITIZER
+//
+// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan
+// memory error detector which can use CPU features like ARM TBI, Intel LAM or
+// AMD UAI.
+#ifdef Y_ABSL_HAVE_HWADDRESS_SANITIZER
+#error "Y_ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_HWADDRESS__)
+#define Y_ABSL_HAVE_HWADDRESS_SANITIZER 1
+#elif Y_ABSL_HAVE_FEATURE(hwaddress_sanitizer)
+#define Y_ABSL_HAVE_HWADDRESS_SANITIZER 1
+#endif
+
+// Y_ABSL_HAVE_LEAK_SANITIZER
+//
+// LeakSanitizer (or lsan) is a detector of memory leaks.
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// The macro Y_ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time
+// whether the LeakSanitizer is potentially available. However, just because the
+// LeakSanitizer is available does not mean it is active. Use the
+// always-available run-time interface in //y_absl/debugging/leak_check.h for
+// interacting with LeakSanitizer.
+#ifdef Y_ABSL_HAVE_LEAK_SANITIZER
+#error "Y_ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
+#elif defined(LEAK_SANITIZER)
+// GCC provides no method for detecting the presense of the standalone
+// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
+// use -DLEAK_SANITIZER.
+#define Y_ABSL_HAVE_LEAK_SANITIZER 1
+// Clang standalone LeakSanitizer (-fsanitize=leak)
+#elif Y_ABSL_HAVE_FEATURE(leak_sanitizer)
+#define Y_ABSL_HAVE_LEAK_SANITIZER 1
+#elif defined(Y_ABSL_HAVE_ADDRESS_SANITIZER)
+// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer.
+#define Y_ABSL_HAVE_LEAK_SANITIZER 1
+#endif
+
// Y_ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
//
// Class template argument deduction is a language feature added in C++17.
@@ -764,4 +821,88 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
+// Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+//
+// Prior to C++17, static constexpr variables defined in classes required a
+// separate definition outside of the class body, for example:
+//
+// class Foo {
+// static constexpr int kBar = 0;
+// };
+// constexpr int Foo::kBar;
+//
+// In C++17, these variables defined in classes are considered inline variables,
+// and the extra declaration is redundant. Since some compilers warn on the
+// extra declarations, Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used
+// conditionally ignore them:
+//
+// #ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+// constexpr int Foo::kBar;
+// #endif
+#if defined(Y_ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ Y_ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+#define Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
+#endif
+
+// `Y_ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with
+// RTTI support.
+#ifdef Y_ABSL_INTERNAL_HAS_RTTI
+#error Y_ABSL_INTERNAL_HAS_RTTI cannot be directly set
+#elif !defined(__GNUC__) || defined(__GXX_RTTI)
+#define Y_ABSL_INTERNAL_HAS_RTTI 1
+#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
+
+// Y_ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE
+#error Y_ABSL_INTERNAL_HAVE_SSE cannot be directly set
+#elif defined(__SSE__)
+#define Y_ABSL_INTERNAL_HAVE_SSE 1
+#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
+// indicates that at least SSE was targeted with the /arch:SSE option.
+// All x86-64 processors support SSE, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define Y_ABSL_INTERNAL_HAVE_SSE 1
+#endif
+
+// Y_ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
+#error Y_ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
+#elif defined(__SSE2__)
+#define Y_ABSL_INTERNAL_HAVE_SSE2 1
+#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
+// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
+// All x86-64 processors support SSE2, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define Y_ABSL_INTERNAL_HAVE_SSE2 1
+#endif
+
+// Y_ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+//
+// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3
+// with MSVC requires either assuming that the code will only every run on CPUs
+// that support SSSE3, otherwise __cpuid() can be used to detect support at
+// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported
+// by the CPU.
+#ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
+#error Y_ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set
+#elif defined(__SSSE3__)
+#define Y_ABSL_INTERNAL_HAVE_SSSE3 1
+#endif
+
+// Y_ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM
+// SIMD).
+#ifdef Y_ABSL_INTERNAL_HAVE_ARM_NEON
+#error Y_ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
+#elif defined(__ARM_NEON)
+#define Y_ABSL_INTERNAL_HAVE_ARM_NEON 1
+#endif
+
#endif // Y_ABSL_BASE_CONFIG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc
index f12104f9e4..86bceda4d4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.cc
@@ -25,6 +25,8 @@
#include <atomic>
#include <chrono> // NOLINT(build/c++11)
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/unscaledcycleclock.h"
namespace y_absl {
@@ -33,44 +35,20 @@ namespace base_internal {
#if Y_ABSL_USE_UNSCALED_CYCLECLOCK
-namespace {
-
-#ifdef NDEBUG
-#ifdef Y_ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-// Not debug mode and the UnscaledCycleClock frequency is the CPU
-// frequency. Scale the CycleClock to prevent overflow if someone
-// tries to represent the time as cycles since the Unix epoch.
-static constexpr int32_t kShift = 1;
-#else
-// Not debug mode and the UnscaledCycleClock isn't operating at the
-// raw CPU frequency. There is no need to do any scaling, so don't
-// needlessly sacrifice precision.
-static constexpr int32_t kShift = 0;
-#endif
-#else
-// In debug mode use a different shift to discourage depending on a
-// particular shift value.
-static constexpr int32_t kShift = 2;
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int32_t CycleClock::kShift;
+constexpr double CycleClock::kFrequencyScale;
#endif
-static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
-static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+Y_ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
+ CycleClock::cycle_clock_source_{nullptr};
-CycleClockSourceFunc LoadCycleClockSource() {
- // Optimize for the common case (no callback) by first doing a relaxed load;
- // this is significantly faster on non-x86 platforms.
- if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
- return nullptr;
- }
- // This corresponds to the store(std::memory_order_release) in
- // CycleClockSource::Register, and makes sure that any updates made prior to
- // registering the callback are visible to this thread before the callback is
- // invoked.
- return cycle_clock_source.load(std::memory_order_acquire);
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+ // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+ CycleClock::cycle_clock_source_.store(source, std::memory_order_release);
}
-} // namespace
-
+#ifdef _WIN32
int64_t CycleClock::Now() {
auto fn = LoadCycleClockSource();
if (fn == nullptr) {
@@ -78,15 +56,7 @@ int64_t CycleClock::Now() {
}
return fn() >> kShift;
}
-
-double CycleClock::Frequency() {
- return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
-}
-
-void CycleClockSource::Register(CycleClockSourceFunc source) {
- // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
- cycle_clock_source.store(source, std::memory_order_release);
-}
+#endif
#else
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h
index 9777d214da..e75b28d1ad 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/cycleclock.h
@@ -42,14 +42,19 @@
#ifndef Y_ABSL_BASE_INTERNAL_CYCLECLOCK_H_
#define Y_ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#include <atomic>
#include <cstdint>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
+#include "y_absl/base/internal/unscaledcycleclock.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace base_internal {
+using CycleClockSourceFunc = int64_t (*)();
+
// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
@@ -68,12 +73,37 @@ class CycleClock {
static double Frequency();
private:
+#if Y_ABSL_USE_UNSCALED_CYCLECLOCK
+ static CycleClockSourceFunc LoadCycleClockSource();
+
+#ifdef NDEBUG
+#ifdef Y_ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+ // Not debug mode and the UnscaledCycleClock frequency is the CPU
+ // frequency. Scale the CycleClock to prevent overflow if someone
+ // tries to represent the time as cycles since the Unix epoch.
+ static constexpr int32_t kShift = 1;
+#else
+ // Not debug mode and the UnscaledCycleClock isn't operating at the
+ // raw CPU frequency. There is no need to do any scaling, so don't
+ // needlessly sacrifice precision.
+ static constexpr int32_t kShift = 0;
+#endif
+#else // NDEBUG
+ // In debug mode use a different shift to discourage depending on a
+ // particular shift value.
+ static constexpr int32_t kShift = 2;
+#endif // NDEBUG
+
+ static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+ Y_ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
+#endif // Y_ABSL_USE_UNSCALED_CYCLECLOC
+
CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;
-};
-using CycleClockSourceFunc = int64_t (*)();
+ friend class CycleClockSource;
+};
class CycleClockSource {
private:
@@ -87,6 +117,41 @@ class CycleClockSource {
static void Register(CycleClockSourceFunc source);
};
+#if Y_ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+#endif // !defined(__x86_64__)
+
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback
+ // is invoked.
+ return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif // Y_ABSL_USE_UNSCALED_CYCLECLOCK
+
} // namespace base_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
index 805ae82873..5b7750628a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
@@ -20,7 +20,7 @@
#include "y_absl/base/config.h"
-#if Y_ABSL_HAVE_MMAP
+#ifdef Y_ABSL_HAVE_MMAP
#include <sys/mman.h>
@@ -41,13 +41,13 @@
#ifdef __mips__
// Include definitions of the ABI currently in use.
-#ifdef __BIONIC__
+#if defined(__BIONIC__) || !defined(__GLIBC__)
// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
// definitions we need.
#include <asm/sgidefs.h>
#else
#include <sgidefs.h>
-#endif // __BIONIC__
+#endif // __BIONIC__ || !__GLIBC__
#endif // __mips__
// SYS_mmap and SYS_munmap are not defined in Android.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
index d450704225..e8ac2d6c3d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
@@ -16,16 +16,9 @@
#ifndef Y_ABSL_BASE_INTERNAL_ENDIAN_H_
#define Y_ABSL_BASE_INTERNAL_ENDIAN_H_
-// The following guarantees declaration of the byte swap functions
-#ifdef _MSC_VER
-#include <stdlib.h> // NOLINT(build/include)
-#elif defined(__FreeBSD__)
-#include <sys/endian.h>
-#elif defined(__GLIBC__)
-#include <byteswap.h> // IWYU pragma: export
-#endif
-
#include <cstdint>
+#include <cstdlib>
+
#include "y_absl/base/casts.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/unaligned_access.h"
@@ -34,47 +27,11 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-// Use compiler byte-swapping intrinsics if they are available. 32-bit
-// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
-// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
-// For simplicity, we enable them all only for GCC 4.8.0 or later.
-#if defined(__clang__) || \
- (defined(__GNUC__) && \
- ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
inline uint64_t gbswap_64(uint64_t host_int) {
+#if Y_ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
return __builtin_bswap64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return __builtin_bswap32(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return __builtin_bswap16(host_int);
-}
-
#elif defined(_MSC_VER)
-inline uint64_t gbswap_64(uint64_t host_int) {
return _byteswap_uint64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return _byteswap_ulong(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return _byteswap_ushort(host_int);
-}
-
-#else
-inline uint64_t gbswap_64(uint64_t host_int) {
-#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
- // Adapted from /usr/include/byteswap.h. Not available on Mac.
- if (__builtin_constant_p(host_int)) {
- return __bswap_constant_64(host_int);
- } else {
- uint64_t result;
- __asm__("bswap %0" : "=r"(result) : "0"(host_int));
- return result;
- }
-#elif defined(__GLIBC__)
- return bswap_64(host_int);
#else
return (((host_int & uint64_t{0xFF}) << 56) |
((host_int & uint64_t{0xFF00}) << 40) |
@@ -84,12 +41,14 @@ inline uint64_t gbswap_64(uint64_t host_int) {
((host_int & uint64_t{0xFF0000000000}) >> 24) |
((host_int & uint64_t{0xFF000000000000}) >> 40) |
((host_int & uint64_t{0xFF00000000000000}) >> 56));
-#endif // bswap_64
+#endif
}
inline uint32_t gbswap_32(uint32_t host_int) {
-#if defined(__GLIBC__)
- return bswap_32(host_int);
+#if Y_ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
+ return __builtin_bswap32(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ulong(host_int);
#else
return (((host_int & uint32_t{0xFF}) << 24) |
((host_int & uint32_t{0xFF00}) << 8) |
@@ -99,33 +58,29 @@ inline uint32_t gbswap_32(uint32_t host_int) {
}
inline uint16_t gbswap_16(uint16_t host_int) {
-#if defined(__GLIBC__)
- return bswap_16(host_int);
+#if Y_ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
+ return __builtin_bswap16(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ushort(host_int);
#else
return (((host_int & uint16_t{0xFF}) << 8) |
((host_int & uint16_t{0xFF00}) >> 8));
#endif
}
-#endif // intrinsics available
-
#ifdef Y_ABSL_IS_LITTLE_ENDIAN
-// Definitions for ntohl etc. that don't require us to include
-// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
-// than just #defining them because in debug mode, gcc doesn't
-// correctly handle the (rather involved) definitions of bswap_32.
-// gcc guarantees that inline functions are as fast as macros, so
-// this isn't a performance hit.
+// Portable definitions for htonl (host-to-network) and friends on little-endian
+// architectures.
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
#elif defined Y_ABSL_IS_BIG_ENDIAN
-// These definitions are simpler on big-endian machines
-// These are functions instead of macros to avoid self-assignment warnings
-// on calls such as "i = ghtnol(i);". This also provides type checking.
+// Portable definitions for htonl (host-to-network) etc on big-endian
+// architectures. These definitions are simpler since the host byte order is the
+// same as network byte order.
inline uint16_t ghtons(uint16_t x) { return x; }
inline uint32_t ghtonl(uint32_t x) { return x; }
inline uint64_t ghtonll(uint64_t x) { return x; }
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
index 4fa0cf8ea6..a99371c091 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
@@ -28,8 +28,10 @@ struct FastTypeTag {
constexpr static char dummy_var = 0;
};
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename Type>
constexpr char FastTypeTag<Type>::dummy_var;
+#endif
// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
// passed-in type. These are meant to be good match for keys into maps or
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
index 5d5348c377..16a222ce5c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
@@ -14,6 +14,8 @@
//
// y_absl::base_internal::invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard.
+// When compiled as C++17 and later versions, it is implemented as an alias of
+// std::invoke.
//
// [func.require]
// Define INVOKE (f, t1, t2, ..., tN) as follows:
@@ -35,6 +37,26 @@
#ifndef Y_ABSL_BASE_INTERNAL_INVOKE_H_
#define Y_ABSL_BASE_INTERNAL_INVOKE_H_
+#include "y_absl/base/config.h"
+
+#if Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#include <functional>
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using std::invoke;
+using std::invoke_result_t;
+using std::is_invocable_r;
+
+} // namespace base_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#else // Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
#include <algorithm>
#include <type_traits>
#include <utility>
@@ -80,8 +102,18 @@ struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
static decltype((std::declval<Obj>().*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+// Ignore bogus GCC warnings on this line.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return (std::forward<Obj>(obj).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic pop
+#endif
}
};
@@ -180,8 +212,30 @@ invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}
+
+template <typename AlwaysVoid, typename, typename, typename...>
+struct IsInvocableRImpl : std::false_type {};
+
+template <typename R, typename F, typename... Args>
+struct IsInvocableRImpl<
+ y_absl::void_t<y_absl::base_internal::invoke_result_t<F, Args...> >, R, F,
+ Args...>
+ : std::integral_constant<
+ bool,
+ std::is_convertible<y_absl::base_internal::invoke_result_t<F, Args...>,
+ R>::value ||
+ std::is_void<R>::value> {};
+
+// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
+// and either the return type is convertible to `R`, or `R` is void.
+// C++11-compatible version of `std::is_invocable_r`.
+template <typename R, typename F, typename... Args>
+using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;
+
} // namespace base_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
+#endif // Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
#endif // Y_ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h
new file mode 100644
index 0000000000..0c9c358570
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h
@@ -0,0 +1,138 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef Y_ABSL_BASE_INTERNAL_PREFETCH_H_
+#define Y_ABSL_BASE_INTERNAL_PREFETCH_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && defined(Y_ABSL_INTERNAL_HAVE_SSE)
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+// Compatibility wrappers around __builtin_prefetch, to prefetch data
+// for read if supported by the toolchain.
+
+// Move data into the cache before it is read, or "prefetch" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// The function names specify the temporal locality heuristic applied,
+// using the names of Intel prefetch instructions:
+//
+// T0 - high degree of temporal locality; data should be left in as
+// many levels of the cache possible
+// T1 - moderate degree of temporal locality
+// T2 - low degree of temporal locality
+// Nta - no temporal locality, data need not be left in the cache
+// after the read
+//
+// Incorrect or gratuitous use of these functions can degrade
+// performance, so use them only when representative benchmarks show
+// an improvement.
+//
+// Example usage:
+//
+// y_absl::base_internal::PrefetchT0(addr);
+//
+// Currently, the different prefetch calls behave on some Intel
+// architectures as follows:
+//
+// SNB..SKL SKX
+// PrefetchT0() L1/L2/L3 L1/L2
+// PrefetchT1() L2/L3 L2
+// PrefetchT2() L2/L3 L2
+// PrefetchNta() L1/--/L3 L1*
+//
+// * On SKX PrefetchNta() will bring the line into L1 but will evict
+// from L3 cache. This might result in surprising behavior.
+//
+// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon.
+//
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+void PrefetchT0(const void* addr);
+void PrefetchT1(const void* addr);
+void PrefetchT2(const void* addr);
+void PrefetchNta(const void* addr);
+
+// Implementation details follow.
+
+#if Y_ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define Y_ABSL_INTERNAL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+// These functions speculatively load for read only. This is
+// safe for all currently supported platforms. However, prefetch for
+// store may have problems depending on the target platform.
+//
+inline void PrefetchT0(const void* addr) {
+ // Note: this uses prefetcht0 on Intel.
+ __builtin_prefetch(addr, 0, 3);
+}
+inline void PrefetchT1(const void* addr) {
+ // Note: this uses prefetcht1 on Intel.
+ __builtin_prefetch(addr, 0, 2);
+}
+inline void PrefetchT2(const void* addr) {
+ // Note: this uses prefetcht2 on Intel.
+ __builtin_prefetch(addr, 0, 1);
+}
+inline void PrefetchNta(const void* addr) {
+ // Note: this uses prefetchtnta on Intel.
+ __builtin_prefetch(addr, 0, 0);
+}
+
+#elif defined(Y_ABSL_INTERNAL_HAVE_SSE)
+
+#define Y_ABSL_INTERNAL_HAVE_PREFETCH 1
+
+inline void PrefetchT0(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
+}
+inline void PrefetchT1(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
+}
+inline void PrefetchT2(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
+}
+inline void PrefetchNta(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
+}
+
+#else
+inline void PrefetchT0(const void*) {}
+inline void PrefetchT1(const void*) {}
+inline void PrefetchT2(const void*) {}
+inline void PrefetchNta(const void*) {}
+#endif
+
+} // namespace base_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_BASE_INTERNAL_PREFETCH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
index 75af4543d5..1a5a18f804 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
@@ -14,15 +14,17 @@
#include "y_absl/base/internal/raw_logging.h"
-#include <stddef.h>
#include <cstdarg>
+#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
+#include <util/generic/string.h>
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/atomic_hook.h"
+#include "y_absl/base/internal/errno_saver.h"
#include "y_absl/base/log_severity.h"
// We know how to perform low-level writes to stderr in POSIX and Windows. For
@@ -36,8 +38,8 @@
// This preprocessor token is also defined in raw_io.cc. If you need to copy
// this, consider moving both to config.h instead.
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
- defined(__Fuchsia__) || defined(__native_client__) || \
- defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+ defined(__Fuchsia__) || defined(__native_client__) || \
+ defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__)
#include <unistd.h>
@@ -50,7 +52,8 @@
// Y_ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
// for low level operations that want to avoid libc.
-#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \
+ !defined(__ANDROID__)
#include <sys/syscall.h>
#define Y_ABSL_HAVE_SYSCALL_WRITE 1
#define Y_ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
@@ -76,13 +79,6 @@ namespace {
// Explicitly `#error` out when not `Y_ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
// a selected set of platforms for which we expect not to be able to raw log.
-Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- y_absl::base_internal::AtomicHook<LogPrefixHook>
- log_prefix_hook;
-Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- y_absl::base_internal::AtomicHook<AbortHook>
- abort_hook;
-
#ifdef Y_ABSL_LOW_LEVEL_WRITE_SUPPORTED
constexpr char kTruncated[] = " ... (message truncated)\n";
@@ -130,6 +126,18 @@ bool DoRawLog(char** buf, int* size, const char* format, ...) {
return true;
}
+bool DefaultLogFilterAndPrefix(y_absl::LogSeverity, const char* file, int line,
+ char** buf, int* buf_size) {
+ DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line);
+ return true;
+}
+
+Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+y_absl::base_internal::AtomicHook<LogFilterAndPrefixHook>
+ log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);
+Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+y_absl::base_internal::AtomicHook<AbortHook> abort_hook;
+
void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) Y_ABSL_PRINTF_ATTRIBUTE(4, 0);
void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
@@ -150,14 +158,7 @@ void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
}
#endif
- auto log_prefix_hook_ptr = log_prefix_hook.Load();
- if (log_prefix_hook_ptr) {
- enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
- } else {
- if (enabled) {
- DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
- }
- }
+ enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);
const char* const prefix_end = buf;
#ifdef Y_ABSL_LOW_LEVEL_WRITE_SUPPORTED
@@ -168,11 +169,12 @@ void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- SafeWriteToStderr(buffer, strlen(buffer));
+ AsyncSignalSafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
static_cast<void>(ap);
+ static_cast<void>(enabled);
#endif
// Abort the process after logging a FATAL message, even if the output itself
@@ -195,8 +197,11 @@ void DefaultInternalLog(y_absl::LogSeverity severity, const char* file, int line
} // namespace
-void SafeWriteToStderr(const char *s, size_t len) {
+void AsyncSignalSafeWriteToStderr(const char* s, size_t len) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
#if defined(Y_ABSL_HAVE_SYSCALL_WRITE)
+ // We prefer calling write via `syscall` to minimize the risk of libc doing
+ // something "helpful".
syscall(SYS_write, STDERR_FILENO, s, len);
#elif defined(Y_ABSL_HAVE_POSIX_WRITE)
write(STDERR_FILENO, s, len);
@@ -229,7 +234,9 @@ Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES Y_ABSL_DLL
y_absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
-void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {
+ log_filter_and_prefix_hook.Store(func);
+}
void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
index 8febc13fc3..56cb5dc363 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
@@ -109,12 +109,9 @@ namespace raw_logging_internal {
void RawLog(y_absl::LogSeverity severity, const char* file, int line,
const char* format, ...) Y_ABSL_PRINTF_ATTRIBUTE(4, 5);
-// Writes the provided buffer directly to stderr, in a safe, low-level manner.
-//
-// In POSIX this means calling write(), which is async-signal safe and does
-// not malloc. If the platform supports the SYS_write syscall, we invoke that
-// directly to side-step any libc interception.
-void SafeWriteToStderr(const char *s, size_t len);
+// Writes the provided buffer directly to stderr, in a signal-safe, low-level
+// manner.
+void AsyncSignalSafeWriteToStderr(const char* s, size_t len);
// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
@@ -148,11 +145,12 @@ bool RawLoggingFullySupported();
// 'severity' is the severity level of the message being written.
// 'file' and 'line' are the file and line number where the Y_ABSL_RAW_LOG macro
// was located.
-// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
-// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the
+// hook writes a prefix, it must increment *buf and decrement *buf_size
// accordingly.
-using LogPrefixHook = bool (*)(y_absl::LogSeverity severity, const char* file,
- int line, char** buffer, int* buf_size);
+using LogFilterAndPrefixHook = bool (*)(y_absl::LogSeverity severity,
+ const char* file, int line, char** buf,
+ int* buf_size);
// Function type for a raw_logging customization hook called to abort a process
// when a FATAL message is logged. If the provided AbortHook() returns, the
@@ -162,7 +160,10 @@ using LogPrefixHook = bool (*)(y_absl::LogSeverity severity, const char* file,
// was located.
// The NUL-terminated logged message lives in the buffer between 'buf_start'
// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
-// buffer (as written by the LogPrefixHook.)
+// buffer (as written by the LogFilterAndPrefixHook.)
+//
+// The lifetime of the filename and message buffers will not end while the
+// process remains alive.
using AbortHook = void (*)(const char* file, int line, const char* buf_start,
const char* prefix_end, const char* buf_end);
@@ -184,7 +185,7 @@ Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES Y_ABSL_DLL extern base_internal::AtomicHo
//
// These functions are safe to call at any point during initialization; they do
// not block or malloc, and are async-signal safe.
-void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
index efffecdb7e..aaa8b3c156 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
@@ -19,6 +19,7 @@
#include <limits>
#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/atomic_hook.h"
#include "y_absl/base/internal/cycleclock.h"
#include "y_absl/base/internal/spinlock_wait.h"
@@ -66,12 +67,14 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
submit_profile_data.Store(fn);
}
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
// Static member variable definitions.
constexpr uint32_t SpinLock::kSpinLockHeld;
constexpr uint32_t SpinLock::kSpinLockCooperative;
constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
constexpr uint32_t SpinLock::kSpinLockSleeper;
constexpr uint32_t SpinLock::kWaitTimeMask;
+#endif
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
index a570699d68..67bbbabdca 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
@@ -120,6 +120,14 @@ class Y_ABSL_LOCKABLE SpinLock {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
+ // Return immediately if this thread holds the SpinLock exclusively.
+ // Otherwise, report an error by crashing with a diagnostic.
+ inline void AssertHeld() const Y_ABSL_ASSERT_EXCLUSIVE_LOCK() {
+ if (!IsHeld()) {
+ Y_ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
+ }
+ }
+
protected:
// These should not be exported except for testing.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
index 5d2bbbcd15..dc654ab6e1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
@@ -57,13 +57,10 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
extern "C" {
Y_ABSL_ATTRIBUTE_WEAK void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t> *w, uint32_t value, int loop,
+ std::atomic<uint32_t> *w, uint32_t value, int,
y_absl::base_internal::SchedulingMode) {
y_absl::base_internal::ErrnoSaver errno_saver;
- struct timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = y_absl::base_internal::SpinLockSuggestedDelayNS(loop);
- syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+ syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr);
}
Y_ABSL_ATTRIBUTE_WEAK void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
index 6a1ef60711..25b6a005bb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
@@ -124,7 +124,6 @@ int Win32NumCPUs() {
} // namespace
-
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
index 9e67f0b964..7f5c39a7d1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
@@ -14,7 +14,7 @@
#include "y_absl/base/internal/thread_identity.h"
-#ifndef _WIN32
+#if !defined(_WIN32) || defined(__MINGW32__)
#include <pthread.h>
#include <signal.h>
#endif
@@ -56,6 +56,7 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// *different* instances of this ptr.
// Apple platforms have the visibility attribute, but issue a compile warning
// that protected visibility is unsupported.
+Y_ABSL_CONST_INIT // Must come before __attribute__((visibility("protected")))
#if Y_ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected")))
#endif // Y_ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
index dcbcb0c24d..4f959fadef 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
@@ -24,8 +24,13 @@
#ifdef __GLIBC__
#include <sys/platform/ppc.h>
#elif defined(__FreeBSD__)
-#include <sys/sysctl.h>
+// clang-format off
+// This order does actually matter =(.
#include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+
+#include "y_absl/base/call_once.h"
#endif
#endif
@@ -49,12 +54,6 @@ double UnscaledCycleClock::Frequency() {
#elif defined(__x86_64__)
-int64_t UnscaledCycleClock::Now() {
- uint64_t low, high;
- __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- return (high << 32) | low;
-}
-
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
index fd82752f78..15ee5e4434 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
@@ -47,7 +47,7 @@
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
- defined(_M_IX86) || defined(_M_X64)
+ defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
#define Y_ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define Y_ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
@@ -59,8 +59,7 @@
// CycleClock that runs at atleast 1 MHz. We've found some Android
// ARM64 devices where this is not the case, so we disable it by
// default on Android ARM64.
-#if defined(__native_client__) || \
- (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
+#if defined(__native_client__) || (defined(__APPLE__)) || \
(defined(__ANDROID__) && defined(__aarch64__))
#define Y_ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
#else
@@ -115,6 +114,16 @@ class UnscaledCycleClock {
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};
+#if defined(__x86_64__)
+
+inline int64_t UnscaledCycleClock::Now() {
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+}
+
+#endif
+
} // namespace base_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc
index 3212ba04c9..890fa4eb6a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.cc
@@ -16,6 +16,8 @@
#include <ostream>
+#include "y_absl/base/attributes.h"
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -23,5 +25,31 @@ std::ostream& operator<<(std::ostream& os, y_absl::LogSeverity s) {
if (s == y_absl::NormalizeLogSeverity(s)) return os << y_absl::LogSeverityName(s);
return os << "y_absl::LogSeverity(" << static_cast<int>(s) << ")";
}
+
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverityAtLeast s) {
+ switch (s) {
+ case y_absl::LogSeverityAtLeast::kInfo:
+ case y_absl::LogSeverityAtLeast::kWarning:
+ case y_absl::LogSeverityAtLeast::kError:
+ case y_absl::LogSeverityAtLeast::kFatal:
+ return os << ">=" << static_cast<y_absl::LogSeverity>(s);
+ case y_absl::LogSeverityAtLeast::kInfinity:
+ return os << "INFINITY";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverityAtMost s) {
+ switch (s) {
+ case y_absl::LogSeverityAtMost::kInfo:
+ case y_absl::LogSeverityAtMost::kWarning:
+ case y_absl::LogSeverityAtMost::kError:
+ case y_absl::LogSeverityAtMost::kFatal:
+ return os << "<=" << static_cast<y_absl::LogSeverity>(s);
+ case y_absl::LogSeverityAtMost::kNegativeInfinity:
+ return os << "NEGATIVE_INFINITY";
+ }
+ return os;
+}
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h
index 07ee164f45..1fc55e8223 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/log_severity.h
@@ -115,6 +115,57 @@ constexpr y_absl::LogSeverity NormalizeLogSeverity(int s) {
// unspecified; do not rely on it.
std::ostream& operator<<(std::ostream& os, y_absl::LogSeverity s);
+// Enums representing a lower bound for LogSeverity. APIs that only operate on
+// messages of at least a certain level (for example, `SetMinLogLevel()`) use
+// this type to specify that level. y_absl::LogSeverityAtLeast::kInfinity is
+// a level above all threshold levels and therefore no log message will
+// ever meet this threshold.
+enum class LogSeverityAtLeast : int {
+ kInfo = static_cast<int>(y_absl::LogSeverity::kInfo),
+ kWarning = static_cast<int>(y_absl::LogSeverity::kWarning),
+ kError = static_cast<int>(y_absl::LogSeverity::kError),
+ kFatal = static_cast<int>(y_absl::LogSeverity::kFatal),
+ kInfinity = 1000,
+};
+
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverityAtLeast s);
+
+// Enums representing an upper bound for LogSeverity. APIs that only operate on
+// messages of at most a certain level (for example, buffer all messages at or
+// below a certain level) use this type to specify that level.
+// y_absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
+// levels and therefore will exclude all log messages.
+enum class LogSeverityAtMost : int {
+ kNegativeInfinity = -1000,
+ kInfo = static_cast<int>(y_absl::LogSeverity::kInfo),
+ kWarning = static_cast<int>(y_absl::LogSeverity::kWarning),
+ kError = static_cast<int>(y_absl::LogSeverity::kError),
+ kFatal = static_cast<int>(y_absl::LogSeverity::kFatal),
+};
+
+std::ostream& operator<<(std::ostream& os, y_absl::LogSeverityAtMost s);
+
+#define COMPOP(op1, op2, T) \
+ constexpr bool operator op1(y_absl::T lhs, y_absl::LogSeverity rhs) { \
+ return static_cast<y_absl::LogSeverity>(lhs) op1 rhs; \
+ } \
+ constexpr bool operator op2(y_absl::LogSeverity lhs, y_absl::T rhs) { \
+ return lhs op2 static_cast<y_absl::LogSeverity>(rhs); \
+ }
+
+// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
+// `LogSeverityAtMost` are only supported in one direction.
+// Valid checks are:
+// LogSeverity >= LogSeverityAtLeast
+// LogSeverity < LogSeverityAtLeast
+// LogSeverity <= LogSeverityAtMost
+// LogSeverity > LogSeverityAtMost
+COMPOP(>, <, LogSeverityAtLeast)
+COMPOP(<=, >=, LogSeverityAtLeast)
+COMPOP(<, >, LogSeverityAtMost)
+COMPOP(>=, <=, LogSeverityAtMost)
+#undef COMPOP
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h
index 72cf6ef8a2..024b5c163c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/optimization.h
@@ -181,35 +181,43 @@
#define Y_ABSL_PREDICT_TRUE(x) (x)
#endif
-// Y_ABSL_INTERNAL_ASSUME(cond)
+// Y_ABSL_ASSUME(cond)
+//
// Informs the compiler that a condition is always true and that it can assume
-// it to be true for optimization purposes. The call has undefined behavior if
-// the condition is false.
+// it to be true for optimization purposes.
+//
+// WARNING: If the condition is false, the program can produce undefined and
+// potentially dangerous behavior.
+//
// In !NDEBUG mode, the condition is checked with an assert().
-// NOTE: The expression must not have side effects, as it will only be evaluated
-// in some compilation modes and not others.
+//
+// NOTE: The expression must not have side effects, as it may only be evaluated
+// in some compilation modes and not others. Some compilers may issue a warning
+// if the compiler cannot prove the expression has no side effects. For example,
+// the expression should not use a function call since the compiler cannot prove
+// that a function call does not have side effects.
//
// Example:
//
// int x = ...;
-// Y_ABSL_INTERNAL_ASSUME(x >= 0);
+// Y_ABSL_ASSUME(x >= 0);
// // The compiler can optimize the division to a simple right shift using the
// // assumption specified above.
// int y = x / 16;
//
#if !defined(NDEBUG)
-#define Y_ABSL_INTERNAL_ASSUME(cond) assert(cond)
+#define Y_ABSL_ASSUME(cond) assert(cond)
#elif Y_ABSL_HAVE_BUILTIN(__builtin_assume)
-#define Y_ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
+#define Y_ABSL_ASSUME(cond) __builtin_assume(cond)
#elif defined(__GNUC__) || Y_ABSL_HAVE_BUILTIN(__builtin_unreachable)
-#define Y_ABSL_INTERNAL_ASSUME(cond) \
+#define Y_ABSL_ASSUME(cond) \
do { \
if (!(cond)) __builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER)
-#define Y_ABSL_INTERNAL_ASSUME(cond) __assume(cond)
+#define Y_ABSL_ASSUME(cond) __assume(cond)
#else
-#define Y_ABSL_INTERNAL_ASSUME(cond) \
+#define Y_ABSL_ASSUME(cond) \
do { \
static_cast<void>(false && (cond)); \
} while (0)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
index 05f05f9410..40601c2bee 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
@@ -206,7 +206,7 @@
// allowed.
#define Y_ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define Y_ABSL_OPTION_INLINE_NAMESPACE_NAME lts_y_20211102
+#define Y_ABSL_OPTION_INLINE_NAMESPACE_NAME lts_y_20220623
// Y_ABSL_OPTION_HARDENED
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h
index 25427b419a..864784143c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/thread_annotations.h
@@ -154,8 +154,8 @@
// Y_ABSL_LOCKS_EXCLUDED()
//
-// Documents the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// Documents the locks that cannot be held by callers of this function, as they
+// might be acquired by this function (Abseil's `Mutex` locks are
// non-reentrant).
#if Y_ABSL_HAVE_ATTRIBUTE(locks_excluded)
#define Y_ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
index 8fb4c679dc..94c69145fa 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
@@ -489,12 +489,14 @@ class FixedArray {
Storage storage_;
};
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
+#endif
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
index 71f49b0bbe..e3d24a077e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
@@ -36,6 +36,7 @@
#include <utility>
#include "y_absl/algorithm/container.h"
+#include "y_absl/base/macros.h"
#include "y_absl/container/internal/container_memory.h"
#include "y_absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "y_absl/container/internal/raw_hash_map.h" // IWYU pragma: export
@@ -75,6 +76,10 @@ struct FlatHashMapPolicy;
// y_absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `y_absl::flat_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `y_absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
@@ -356,8 +361,8 @@ class flat_hash_map : public y_absl::container_internal::raw_hash_map<
// `flat_hash_map`.
//
// iterator try_emplace(const_iterator hint,
- // const init_type& k, Args&&... args):
- // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
@@ -541,10 +546,12 @@ class flat_hash_map : public y_absl::container_internal::raw_hash_map<
// erase_if(flat_hash_map<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
-void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
+ flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
index b91b1fb11d..fd5d07019a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
@@ -36,7 +36,6 @@
#define Y_ABSL_CONTAINER_INLINED_VECTOR_H_
#include <algorithm>
-#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
@@ -152,7 +151,7 @@ class InlinedVector {
const allocator_type& allocator = allocator_type())
: storage_(allocator) {
storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
- std::distance(first, last));
+ static_cast<size_t>(std::distance(first, last)));
}
// Creates an inlined vector with elements constructed from the provided input
@@ -233,8 +232,8 @@ class InlinedVector {
// specified allocator is also `noexcept`.
InlinedVector(
InlinedVector&& other,
- const allocator_type& allocator)
- noexcept(y_absl::allocator_is_nothrow<allocator_type>::value)
+ const allocator_type&
+ allocator) noexcept(y_absl::allocator_is_nothrow<allocator_type>::value)
: storage_(allocator) {
if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
@@ -486,8 +485,8 @@ class InlinedVector {
InlinedVector& operator=(InlinedVector&& other) {
if (Y_ABSL_PREDICT_TRUE(this != std::addressof(other))) {
if (IsMemcpyOk<A>::value || other.storage_.GetIsAllocated()) {
- inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(),
- data(), size());
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.MemcpyFrom(other.storage_);
@@ -523,7 +522,7 @@ class InlinedVector {
EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
void assign(ForwardIterator first, ForwardIterator last) {
storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
- std::distance(first, last));
+ static_cast<size_t>(std::distance(first, last)));
}
// Overload of `InlinedVector::assign(...)` to replace the contents of the
@@ -586,8 +585,20 @@ class InlinedVector {
if (Y_ABSL_PREDICT_TRUE(n != 0)) {
value_type dealias = v;
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+ // It appears that GCC thinks that since `pos` is a const pointer and may
+ // point to uninitialized memory at this point, a warning should be
+ // issued. But `pos` is actually only used to compute an array index to
+ // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
n);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
} else {
return const_cast<iterator>(pos);
}
@@ -721,8 +732,8 @@ class InlinedVector {
// Destroys all elements in the inlined vector, setting the size to `0` and
// deallocating any held memory.
void clear() noexcept {
- inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(), data(),
- size());
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.SetInlinedSize(0);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h
index 6c7369e67b..c32dbd8179 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common.h
@@ -84,10 +84,11 @@ class node_handle_base {
PolicyTraits::transfer(alloc(), slot(), s);
}
- struct move_tag_t {};
- node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ struct construct_tag_t {};
+ template <typename... Args>
+ node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args)
: alloc_(a) {
- PolicyTraits::construct(alloc(), slot(), s);
+ PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
}
void destroy() {
@@ -186,8 +187,8 @@ struct CommonAccess {
}
template <typename T, typename... Args>
- static T Move(Args&&... args) {
- return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ static T Construct(Args&&... args) {
+ return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
}
};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
index 90d35184ab..d40cf7531c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
@@ -174,7 +174,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
-// std::move(p.first), std::move(p.second));
+// std::move(a.first), std::move(a.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
@@ -402,6 +402,15 @@ struct map_slot_policy {
}
}
+ // Construct this slot by copying from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot,
+ const slot_type* other) {
+ emplace(slot);
+ y_absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ other->value);
+ }
+
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
@@ -424,33 +433,6 @@ struct map_slot_policy {
}
destroy(alloc, old_slot);
}
-
- template <class Allocator>
- static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
- if (kMutableKeys::value) {
- using std::swap;
- swap(a->mutable_value, b->mutable_value);
- } else {
- value_type tmp = std::move(a->value);
- y_absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
- y_absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
- std::move(b->value));
- y_absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
- y_absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
- std::move(tmp));
- }
- }
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
- if (kMutableKeys::value) {
- dest->mutable_value = std::move(src->mutable_value);
- } else {
- y_absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
- y_absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
- std::move(src->value));
- }
- }
};
} // namespace container_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
index 51235fd9be..36816872fd 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
@@ -21,33 +21,43 @@
#include <limits>
#include "y_absl/base/attributes.h"
-#include "y_absl/container/internal/have_sse.h"
+#include "y_absl/base/config.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/memory/memory.h"
#include "y_absl/profiling/internal/exponential_biased.h"
#include "y_absl/profiling/internal/sample_recorder.h"
#include "y_absl/synchronization/mutex.h"
+#include "y_absl/utility/utility.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace container_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int HashtablezInfo::kMaxStackDepth;
+#endif
namespace {
Y_ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
Y_ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
#if defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
Y_ABSL_PER_THREAD_TLS_KEYWORD y_absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
+void TriggerHashtablezConfigListener() {
+ auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
+ if (listener != nullptr) listener();
+}
+
} // namespace
#if defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-Y_ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+Y_ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& GlobalHashtablezSampler() {
@@ -55,13 +65,11 @@ HashtablezSampler& GlobalHashtablezSampler() {
return *sampler;
}
-// TODO(bradleybear): The comments at this constructors declaration say that the
-// fields are not initialized, but this definition does initialize the fields.
-// Something needs to be cleaned up.
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+ size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -74,11 +82,13 @@ void HashtablezInfo::PrepareForSampling() {
max_reserve.store(0, std::memory_order_relaxed);
create_time = y_absl::Now();
+ weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
depth = y_absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
+ inline_element_size = inline_element_size_value;
}
static bool ShouldForceSampling() {
@@ -101,23 +111,32 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size) {
if (Y_ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
+ next_sample.next_sample = 1;
+ const int64_t old_stride = exchange(next_sample.sample_stride, 1);
+ HashtablezInfo* result =
+ GlobalHashtablezSampler().Register(old_stride, inline_element_size);
return result;
}
#if !defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
+ next_sample = {
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max(),
+ };
return nullptr;
#else
- bool first = *next_sample < 0;
- *next_sample = g_exponential_biased_generator.GetStride(
+ bool first = next_sample.next_sample < 0;
+
+ const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ next_sample.next_sample = next_stride;
+ const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
- Y_ABSL_ASSERT(*next_sample >= 1);
+ Y_ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@@ -127,13 +146,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
- if (Y_ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ if (Y_ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
}
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
- return result;
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}
@@ -146,7 +163,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
@@ -163,11 +180,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->size.fetch_add(1, std::memory_order_relaxed);
}
+void SetHashtablezConfigListener(HashtablezConfigListener l) {
+ g_hashtablez_config_listener.store(l, std::memory_order_release);
+}
+
+bool IsHashtablezEnabled() {
+ return g_hashtablez_enabled.load(std::memory_order_acquire);
+}
+
void SetHashtablezEnabled(bool enabled) {
+ SetHashtablezEnabledInternal(enabled);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezEnabledInternal(bool enabled) {
g_hashtablez_enabled.store(enabled, std::memory_order_release);
}
+int32_t GetHashtablezSampleParameter() {
+ return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
+}
+
void SetHashtablezSampleParameter(int32_t rate) {
+ SetHashtablezSampleParameterInternal(rate);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezSampleParameterInternal(int32_t rate) {
if (rate > 0) {
g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
} else {
@@ -176,7 +215,16 @@ void SetHashtablezSampleParameter(int32_t rate) {
}
}
+int32_t GetHashtablezMaxSamples() {
+ return GlobalHashtablezSampler().GetMaxSamples();
+}
+
void SetHashtablezMaxSamples(int32_t max) {
+ SetHashtablezMaxSamplesInternal(max);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezMaxSamplesInternal(int32_t max) {
if (max > 0) {
GlobalHashtablezSampler().SetMaxSamples(max);
} else {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h
index 3f600deea0..fefa2bb9f0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.h
@@ -44,9 +44,9 @@
#include <memory>
#include <vector>
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/per_thread_tls.h"
#include "y_absl/base/optimization.h"
-#include "y_absl/container/internal/have_sse.h"
#include "y_absl/profiling/internal/sample_recorder.h"
#include "y_absl/synchronization/mutex.h"
#include "y_absl/utility/utility.h"
@@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling() Y_ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+ void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+ Y_ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
@@ -84,18 +85,18 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
- // These are guarded by init_mu, but that is not externalized to clients, who
- // can only read them during `HashtablezSampler::Iterate` which will hold the
- // lock.
+ // These are guarded by init_mu, but that is not externalized to clients,
+ // which can read them only during `SampleRecorder::Iterate` which will hold
+ // the lock.
static constexpr int kMaxStackDepth = 64;
y_absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
- size_t inline_element_size;
+ size_t inline_element_size; // How big is the slot?
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
@@ -144,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+struct SamplingState {
+ int64_t next_sample;
+ // When we make a sampling decision, we record that distance so we can weight
+ // each sample.
+ int64_t sample_stride;
+};
+
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -234,7 +243,7 @@ class HashtablezInfoHandle {
#endif // defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern Y_ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern Y_ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
@@ -242,11 +251,11 @@ extern Y_ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
inline HashtablezInfoHandle Sample(
size_t inline_element_size Y_ABSL_ATTRIBUTE_UNUSED) {
#if defined(Y_ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- if (Y_ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ if (Y_ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
- SampleSlow(&global_next_sample, inline_element_size));
+ SampleSlow(global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !Y_ABSL_PER_THREAD_TLS
@@ -258,14 +267,23 @@ using HashtablezSampler =
// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();
+using HashtablezConfigListener = void (*)();
+void SetHashtablezConfigListener(HashtablezConfigListener l);
+
// Enables or disables sampling for Swiss tables.
+bool IsHashtablezEnabled();
void SetHashtablezEnabled(bool enabled);
+void SetHashtablezEnabledInternal(bool enabled);
// Sets the rate at which Swiss tables will be sampled.
+int32_t GetHashtablezSampleParameter();
void SetHashtablezSampleParameter(int32_t rate);
+void SetHashtablezSampleParameterInternal(int32_t rate);
// Sets a soft max for the number of samples that will be kept.
+int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
+void SetHashtablezMaxSamplesInternal(int32_t max);
// Configuration override.
// This allows process-wide sampling without depending on order of
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h
deleted file mode 100644
index d89f632a45..0000000000
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/have_sse.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Shared config probing for SSE instructions used in Swiss tables.
-#ifndef Y_ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#define Y_ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-
-#ifndef Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
-#else
-#define Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#ifdef __SSSE3__
-#define Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
-#else
-#define Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
-#endif
-#endif
-
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
- !Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#endif // Y_ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
index 9c8b3858af..047dd99882 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
@@ -40,7 +40,6 @@ namespace inlined_vector_internal {
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
template <typename A>
@@ -94,16 +93,30 @@ struct TypeIdentity {
template <typename T>
using NoTypeDeduction = typename TypeIdentity<T>::type;
+template <typename A, bool IsTriviallyDestructible =
+ y_absl::is_trivially_destructible<ValueType<A>>::value>
+struct DestroyAdapter;
+
template <typename A>
-void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
- SizeType<A> destroy_size) {
- if (destroy_first != nullptr) {
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ false> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
for (SizeType<A> i = destroy_size; i != 0;) {
--i;
AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
}
-}
+};
+
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ true> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
+ static_cast<void>(allocator);
+ static_cast<void>(destroy_first);
+ static_cast<void>(destroy_size);
+ }
+};
template <typename A>
struct Allocation {
@@ -133,7 +146,7 @@ void ConstructElements(NoTypeDeduction<A>& allocator,
for (SizeType<A> i = 0; i < construct_size; ++i) {
Y_ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
Y_ABSL_INTERNAL_CATCH_ANY {
- DestroyElements<A>(allocator, construct_first, i);
+ DestroyAdapter<A>::DestroyElements(allocator, construct_first, i);
Y_ABSL_INTERNAL_RETHROW;
}
}
@@ -253,7 +266,7 @@ class ConstructionTransaction {
~ConstructionTransaction() {
if (DidConstruct()) {
- DestroyElements<A>(GetAllocator(), GetData(), GetSize());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), GetData(), GetSize());
}
}
@@ -297,10 +310,10 @@ class Storage {
// Storage Constructors and Destructor
// ---------------------------------------------------------------------------
- Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
+ Storage() : metadata_(A(), /* size and is_allocated */ 0u) {}
explicit Storage(const A& allocator)
- : metadata_(allocator, /* size and is_allocated */ 0) {}
+ : metadata_(allocator, /* size and is_allocated */ 0u) {}
~Storage() {
if (GetSizeAndIsAllocated() == 0) {
@@ -416,7 +429,7 @@ class Storage {
}
void SubtractSize(SizeType<A> count) {
- assert(count <= GetSize());
+ Y_ABSL_HARDENING_ASSERT(count <= GetSize());
GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
@@ -427,7 +440,8 @@ class Storage {
}
void MemcpyFrom(const Storage& other_storage) {
- assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
+ Y_ABSL_HARDENING_ASSERT(IsMemcpyOk<A>::value ||
+ other_storage.GetIsAllocated());
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -469,14 +483,14 @@ class Storage {
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
- DestroyElements<A>(GetAllocator(), data, GetSize());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
const SizeType<A> n = other.GetSize();
- assert(n > 0); // Empty sources handled handled in caller.
+ Y_ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller.
ConstPointer<A> src;
Pointer<A> dst;
if (!other.GetIsAllocated()) {
@@ -508,8 +522,8 @@ template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
// Only callable from constructors!
- assert(!GetIsAllocated());
- assert(GetSize() == 0);
+ Y_ABSL_HARDENING_ASSERT(!GetIsAllocated());
+ Y_ABSL_HARDENING_ASSERT(GetSize() == 0);
Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
@@ -566,7 +580,8 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
construct_loop.size());
- DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), destroy_loop.data(),
+ destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
@@ -587,7 +602,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
A& alloc = GetAllocator();
if (new_size <= size) {
// Destroy extra old elements.
- DestroyElements<A>(alloc, base + new_size, size - new_size);
+ DestroyAdapter<A>::DestroyElements(alloc, base + new_size, size - new_size);
} else if (new_size <= storage_view.capacity) {
// Construct new elements in place.
ConstructElements<A>(alloc, base + size, values, new_size - size);
@@ -595,7 +610,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
- // c. Move existing elements from old backing store to now.
+ // c. Move existing elements from old backing store to new backing store.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
@@ -611,7 +626,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
(MoveIterator<A>(base)));
ConstructElements<A>(alloc, new_data, move_values, size);
- DestroyElements<A>(alloc, base, size);
+ DestroyAdapter<A>::DestroyElements(alloc, base, size);
std::move(construction_tx).Commit();
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -650,7 +665,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
move_values, storage_view.size - insert_index);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
std::move(construction_tx).Commit();
std::move(move_construction_tx).Commit();
@@ -753,7 +769,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
Y_ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -778,9 +795,9 @@ auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
- DestroyElements<A>(GetAllocator(),
- storage_view.data + (storage_view.size - erase_size),
- erase_size);
+ DestroyAdapter<A>::DestroyElements(
+ GetAllocator(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
@@ -804,7 +821,8 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
ConstructElements<A>(GetAllocator(), new_data, move_values,
storage_view.size);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -814,7 +832,7 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::ShrinkToFit() -> void {
// May only be called on allocated instances!
- assert(GetIsAllocated());
+ Y_ABSL_HARDENING_ASSERT(GetIsAllocated());
StorageView<A> storage_view{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()};
@@ -847,7 +865,8 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
Y_ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
storage_view.capacity);
@@ -862,7 +881,7 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
using std::swap;
- assert(this != other_storage_ptr);
+ Y_ABSL_HARDENING_ASSERT(this != other_storage_ptr);
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
@@ -883,9 +902,10 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
move_values,
large_ptr->GetSize() - small_ptr->GetSize());
- DestroyElements<A>(large_ptr->GetAllocator(),
- large_ptr->GetInlinedData() + small_ptr->GetSize(),
- large_ptr->GetSize() - small_ptr->GetSize());
+ DestroyAdapter<A>::DestroyElements(
+ large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
@@ -904,23 +924,24 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
inlined_ptr->GetSize());
}
Y_ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocation(
- Allocation<A>{allocated_storage_view.data, allocated_storage_view.capacity});
+ allocated_ptr->SetAllocation(Allocation<A>{
+ allocated_storage_view.data, allocated_storage_view.capacity});
Y_ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(inlined_ptr->GetAllocator(),
- inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
+ DestroyAdapter<A>::DestroyElements(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
- inlined_ptr->SetAllocation(
- Allocation<A>{allocated_storage_view.data, allocated_storage_view.capacity});
+ inlined_ptr->SetAllocation(Allocation<A>{allocated_storage_view.data,
+ allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
-// End ignore "array-bounds" and "maybe-uninitialized"
+// End ignore "array-bounds"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
index a3291e681e..d18535fcbb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
@@ -23,13 +23,17 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace container_internal {
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
alignas(16) Y_ABSL_CONST_INIT Y_ABSL_DLL const ctrl_t kEmptyGroup[16] = {
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
+#endif
// Returns "random" seed.
inline size_t RandomSeed() {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
index d264d85eef..e24a2bb813 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
@@ -53,51 +53,121 @@
//
// IMPLEMENTATION DETAILS
//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+// struct BackingArray {
+// // Control bytes for the "real" slots.
+// ctrl_t ctrl[capacity];
+// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+// // stop and serves no other purpose.
+// ctrl_t sentinel;
+// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+// // that if a probe sequence picks a value near the end of `ctrl`,
+// // `Group` will have valid control bytes to look at.
+// ctrl_t clones[kWidth - 1];
+// // The actual slot data.
+// slot_type slots[capacity];
+// };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a special end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
+//
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
+//
+// # Hashing
+//
+// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
+// `H1(hash(x))` is an index into `slots`, and essentially the starting point
+// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
+// objects that cannot possibly be the one we are looking for.
+//
+// # Table operations.
//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// The key operations are `insert`, `find`, and `erase`.
//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// Since `insert` and `erase` are implemented in terms of `find`, we describe
+// `find` first. To `find` a value `x`, we compute `hash(x)`. From
+// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
+// group of slots in some interesting order.
//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
//
-// P(0) = H1 % N
+// The `H2` bits ensure when we compare a slot to an object with `==`, we are
+// likely to have actually found the object. That is, the chance is low that
+// `==` is called and returns `false`. Thus, when we search for an object, we
+// are unlikely to call `==` many times. This likelyhood can be analyzed as
+// follows (assuming that H2 is a random enough hash function).
//
-// P(i) = (P(i - 1) + i) % N
+// Let's assume that there are `k` "wrong" objects that must be examined in a
+// probe sequence. For example, when doing a `find` on an object that is in the
+// table, `k` is the number of objects between the start of the probe sequence
+// and the final found object (not including the final found object). The
+// expected number of objects with an H2 match is then `k/128`. Measurements
+// and analysis indicate that even at high load factors, `k` is less than 32,
+// meaning that the number of "false positive" comparisons we must perform is
+// less than 1/8 per `find`.
+
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
//
-// The control state and slot array are stored contiguously in a shared heap
-// allocation. The layout of this allocation is: `capacity()` control bytes,
-// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
-// <possible padding>, `capacity()` slots. The sentinel control byte is used in
-// iteration so we know when we reach the end of the table. The cloned control
-// bytes at the end of the table are cloned from the beginning of the table so
-// groups that begin near the end of the table can see a full group. In cases in
-// which there are more than `capacity()` cloned control bytes, the extra bytes
-// are `kEmpty`, and these ensure that we always see at least one empty slot and
-// can stop an unsuccessful search.
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, initialized slot pointee to the caller.
+//
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
#ifndef Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -113,7 +183,9 @@
#include <type_traits>
#include <utility>
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/endian.h"
+#include "y_absl/base/internal/prefetch.h"
#include "y_absl/base/optimization.h"
#include "y_absl/base/port.h"
#include "y_absl/container/internal/common.h"
@@ -122,12 +194,27 @@
#include "y_absl/container/internal/hash_policy_traits.h"
#include "y_absl/container/internal/hashtable_debug_hooks.h"
#include "y_absl/container/internal/hashtablez_sampler.h"
-#include "y_absl/container/internal/have_sse.h"
#include "y_absl/memory/memory.h"
#include "y_absl/meta/type_traits.h"
#include "y_absl/numeric/bits.h"
#include "y_absl/utility/utility.h"
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef Y_ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -142,14 +229,40 @@ template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// are mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
+//
+// It turns out that this probe sequence visits every group exactly once if the
+// number of groups is a power of two, since (i^2+i)/2 is a bijection in
+// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
template <size_t Width>
class probe_seq {
public:
+ // Creates a new probe sequence using `hash` as the initial value of the
+ // sequence and `mask` (usually the capacity of the table) as the mask to
+ // apply to each value in the progression.
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
+
+ // The offset within the table, i.e., the value `p(i)` above.
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
@@ -158,7 +271,7 @@ class probe_seq {
offset_ += index_;
offset_ &= mask_;
}
- // 0-based probe index. The i-th probe in the probe sequence.
+ // 0-based probe index, a multiple of `Width`.
size_t index() const { return index_; }
private:
@@ -182,9 +295,9 @@ struct IsDecomposable : std::false_type {};
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
- y_absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
+ y_absl::void_t<decltype(Policy::apply(
+ RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -200,57 +313,84 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
template <typename T>
uint32_t TrailingZeros(T x) {
- Y_ABSL_INTERNAL_ASSUME(x != 0);
- return countr_zero(x);
+ Y_ABSL_ASSUME(x != 0);
+ return static_cast<uint32_t>(countr_zero(x));
}
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+// An abstract bitmask, such as that emitted by a SIMD instruction.
//
-// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
-// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than one real bit is set in an abstract bit.
template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
-
+class NonIterableBitMask {
public:
- // These are useful for unit tests (gunit).
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
+ explicit NonIterableBitMask(T mask) : mask_(mask) {}
- explicit BitMask(T mask) : mask_(mask) {}
- BitMask& operator++() {
- mask_ &= (mask_ - 1);
- return *this;
- }
- explicit operator bool() const { return mask_ != 0; }
- int operator*() const { return LowestBitSet(); }
+ explicit operator bool() const { return this->mask_ != 0; }
+
+ // Returns the index of the lowest *abstract* bit set in `self`.
uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+
+ // Returns the index of the highest *abstract* bit set in `self`.
uint32_t HighestBitSet() const {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
-
+ // Return the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+ // Return the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return countl_zero(mask_ << extra_bits) >> Shift;
+ return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
}
+ T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+ using Base = NonIterableBitMask<T, SignificantBits, Shift>;
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ explicit BitMask(T mask) : Base(mask) {}
+ // BitMask is an iterator over the indices of its abstract bits.
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ BitMask& operator++() {
+ this->mask_ &= (this->mask_ - 1);
+ return *this;
+ }
+
+ uint32_t operator*() const { return Base::LowestBitSet(); }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
private:
friend bool operator==(const BitMask& a, const BitMask& b) {
return a.mask_ == b.mask_;
@@ -258,15 +398,27 @@ class BitMask {
friend bool operator!=(const BitMask& a, const BitMask& b) {
return a.mask_ != b.mask_;
}
-
- T mask_;
};
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
-// below for details. We use an enum class so that when strict aliasing is
-// enabled, the compiler knows ctrl_t doesn't alias other types.
+// below for details.
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
@@ -294,15 +446,17 @@ static_assert(
static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
"ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
"shared by ctrl_t::kSentinel to make the scalar test for "
- "MatchEmptyOrDeleted() efficient");
+ "MaskEmptyOrDeleted() efficient");
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
"ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
Y_ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
+ // Const must be cast away here; no uses of this function will actually write
+ // to it, because it is only used for empty tables.
return const_cast<ctrl_t*>(kEmptyGroup);
}
@@ -310,28 +464,61 @@ inline ctrl_t* EmptyGroup() {
// randomize insertion order within groups.
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
+ return (hash >> 7) ^ PerTableSalt(ctrl);
}
+
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
inline h2_t H2(size_t hash) { return hash & 0x7F; }
+// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -360,30 +547,32 @@ struct GroupSse2Impl {
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(hash);
return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
}
// Returns a bitmask representing the positions of empty slots.
- BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+ NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+#ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
// This only works because ctrl_t::kEmpty is -128.
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
- return Match(static_cast<h2_t>(ctrl_t::kEmpty));
+ auto match = _mm_set1_epi8(static_cast<h2_t>(ctrl_t::kEmpty));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
- BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
+ return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
@@ -391,7 +580,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -405,6 +594,63 @@ struct GroupSse2Impl {
};
#endif // Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(Y_ABSL_INTERNAL_HAVE_ARM_NEON) && defined(Y_ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupAArch64Impl(const ctrl_t* pos) {
+ ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+ }
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ uint8x8_t dup = vdup_n_u8(hash);
+ auto mask = vceq_u8(ctrl, dup);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>(
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(
+ vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+ // so we should be fine.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((mask | ~(mask >> 7)) & bits) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = mask & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint8x8_t ctrl;
+};
+#endif // Y_ABSL_INTERNAL_HAVE_ARM_NEON && Y_ABSL_IS_LITTLE_ENDIAN
+
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -431,19 +677,23 @@ struct GroupPortableImpl {
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+ msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+ msbs);
}
uint32_t CountLeadingEmptyOrDeleted() const {
- constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
- return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3;
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -457,32 +707,40 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef Y_ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
+#elif defined(Y_ABSL_INTERNAL_HAVE_ARM_NEON) && defined(Y_ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
#else
using Group = GroupPortableImpl;
#endif
-// The number of cloned control bytes that we copy from the beginning to the
-// end of the control bytes array.
+// Returns he number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+// Applies the following mapping to every byte in the control array:
+// * kDeleted -> kEmpty
+// * kEmpty -> kEmpty
+// * _ -> kDeleted
// PRECONDITION:
// IsValidCapacity(capacity)
// ctrl[capacity] == ctrl_t::kSentinel
// ctrl[i] != ctrl_t::kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
}
@@ -495,8 +753,8 @@ inline size_t NormalizeCapacity(size_t n) {
// never need to probe (the whole table fits in one group) so we don't need a
// load factor less than 1.
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -506,8 +764,12 @@ inline size_t CapacityToGrowth(size_t capacity) {
}
return capacity - capacity / 8;
}
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -533,16 +795,15 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-inline void AssertIsFull(ctrl_t* ctrl) {
- Y_ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
-}
+#define Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \
+ Y_ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg)
inline void AssertIsValid(ctrl_t* ctrl) {
- Y_ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+ Y_ABSL_HARDENING_ASSERT(
+ (ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, the table might have rehashed, or this may "
+ "be an end() iterator.");
}
struct FindInfo {
@@ -550,44 +811,40 @@ struct FindInfo {
size_t probe_length;
};
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
//
-// - In small mode only the first `capacity()` control bytes after the
-// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_non_full()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+// Begins a probing operation on `ctrl`, using `hash`.
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
-// NOTE: this function must work with tables having both ctrl_t::kEmpty and
-// ctrl_t::kDeleted in one group. Such tables appears during
-// drop_deletes_without_resize.
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
+// Behavior when the entire table is full is undefined.
+//
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
template <typename = void>
inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
+ auto mask = g.MaskEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
@@ -610,7 +867,8 @@ inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
// corresponding translation unit.
extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as marked as empty.
inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
size_t slot_size) {
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
@@ -619,8 +877,10 @@ inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
}
-// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
-// at the end too.
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
assert(i < capacity);
@@ -636,25 +896,28 @@ inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
}
-// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
-// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
-// SlotOffset returns the offset of the slots into the allocated block.
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
inline size_t SlotOffset(size_t capacity, size_t slot_align) {
assert(IsValidCapacity(capacity));
const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
}
-// Returns the size of the allocated block. See also above comment.
+// Given the capacity of a table, computes the total size of the backing
+// array.
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
+// A SwissTable.
+//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -769,16 +1032,22 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- AssertIsFull(ctrl_);
+ Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator*() called on invalid iterator.");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
- pointer operator->() const { return &operator*(); }
+ pointer operator->() const {
+ Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator-> called on invalid iterator.");
+ return &operator*();
+ }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- AssertIsFull(ctrl_);
+ Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator++ called on invalid iterator.");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -804,9 +1073,13 @@ class raw_hash_set {
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
- Y_ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ Y_ABSL_ASSUME(ctrl != nullptr);
}
+ // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+ // they reach one.
+ //
+ // If a sentinel is reached, we null both of them out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@@ -1103,8 +1376,7 @@ class raw_hash_set {
// m.insert(std::make_pair("abc", 42));
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
// bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
@@ -1324,7 +1596,8 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- AssertIsFull(it.ctrl_);
+ Y_ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_,
+ "erase() called on invalid iterator.");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1358,7 +1631,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_);
+ Y_ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
+ "extract() called on invalid iterator.");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1445,12 +1719,13 @@ class raw_hash_set {
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
(void)key;
-#if defined(__GNUC__)
+ // Avoid probing if we won't be able to prefetch the addresses received.
+#ifdef Y_ABSL_INTERNAL_HAVE_PREFETCH
prefetch_heap_block();
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
- __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
- __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif // __GNUC__
+ base_internal::PrefetchT0(ctrl_ + seq.offset());
+ base_internal::PrefetchT0(slots_ + seq.offset());
+#endif // Y_ABSL_INTERNAL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -1465,13 +1740,13 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (Y_ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
- if (Y_ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
@@ -1538,6 +1813,14 @@ class raw_hash_set {
return !(a == b);
}
+ template <typename H>
+ friend typename std::enable_if<H::template is_hashable<value_type>::value,
+ H>::type
+ AbslHashValue(H h, const raw_hash_set& s) {
+ return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
+ s.size());
+ }
+
friend void swap(raw_hash_set& a,
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
@@ -1603,17 +1886,17 @@ class raw_hash_set {
slot_type&& slot;
};
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
+ // Erases, but does not destroy, the value pointed to by `it`.
+ //
+ // This merely updates the pertinent control byte. This can be used in
+ // conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
--size_;
- const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index = static_cast<size_t>(it.inner_.ctrl_ - ctrl_);
const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+ const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -1629,6 +1912,11 @@ class raw_hash_set {
infoz().RecordErase();
}
+ // Allocates a backing array for `self` and initializes its control bytes.
+ // This reads `capacity_` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // This does not free the currently held array; `capacity_` must be nonzero.
void initialize_slots() {
assert(capacity_);
// Folks with custom allocators often make unwarranted assumptions about the
@@ -1657,6 +1945,10 @@ class raw_hash_set {
infoz().RecordStorageChanged(size_, capacity_);
}
+ // Destroys all slots in the backing array, frees the backing array, and
+ // clears all top-level book-keeping data.
+ //
+ // This essentially implements `map = raw_hash_set();`.
void destroy_slots() {
if (!capacity_) return;
for (size_t i = 0; i != capacity_; ++i) {
@@ -1707,6 +1999,9 @@ class raw_hash_set {
infoz().RecordRehash(total_probe_length);
}
+ // Prunes control bytes to remove as many tombstones as possible.
+ //
+ // See the comment on `rehash_and_grow_if_necessary()`.
void drop_deletes_without_resize() Y_ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
assert(!is_small(capacity_));
@@ -1773,6 +2068,11 @@ class raw_hash_set {
infoz().RecordRehash(total_probe_length);
}
+ // Called whenever the table *might* need to conditionally grow.
+ //
+ // This function is an optimization opportunity to perform a rehash even when
+ // growth is unnecessary, because vacating tombstones is beneficial for
+ // performance in the long-run.
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
@@ -1832,12 +2132,12 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (Y_ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
elem))
return true;
}
- if (Y_ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
@@ -1857,6 +2157,9 @@ class raw_hash_set {
}
protected:
+ // Attempts to find `key` in the table; if it isn't found, returns a slot that
+ // the value can be inserted into, with the control byte already set to
+ // `key`'s H2.
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
@@ -1864,19 +2167,23 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (Y_ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return {seq.offset(i), false};
}
- if (Y_ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
+ // Given the hash of a value not currently in the table, finds the next
+ // viable slot index to insert it at.
+ //
+ // REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) Y_ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(ctrl_, hash, capacity_);
if (Y_ABSL_PREDICT_FALSE(growth_left() == 0 &&
@@ -1920,15 +2227,23 @@ class raw_hash_set {
growth_left() = CapacityToGrowth(capacity()) - size_;
}
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // This is stored separately due to tombstones: we do not include tombstones
+ // in the growth capacity, because we'd like to rehash when the table is
+ // otherwise filled with tombstones: otherwise, probe sequences might get
+ // unacceptably long without triggering a rehash. Callers can also force a
+ // rehash via the standard `rehash(0)`, which will recompute this value as a
+ // side-effect.
+ //
+ // See `CapacityToGrowth()`.
size_t& growth_left() { return settings_.template get<0>(); }
+ // Prefetch the heap-allocated memory region to resolve potential TLB misses.
+ // This is intended to overlap with execution of calculating the hash for a
+ // key.
void prefetch_heap_block() const {
- // Prefetch the heap-allocated memory region to resolve potential TLB
- // misses. This is intended to overlap with execution of calculating the
- // hash for a key.
-#if defined(__GNUC__)
- __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
-#endif // __GNUC__
+ base_internal::PrefetchT2(ctrl_);
}
HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
@@ -1945,20 +2260,33 @@ class raw_hash_set {
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
+
+ // The control bytes (and, also, a pointer to the base of the backing array).
+ //
+ // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ ctrl_t* ctrl_ = EmptyGroup();
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `ctrl_`. May be null for empty tables.
+ slot_type* slots_ = nullptr;
+
+ // The number of filled slots.
+ size_t size_ = 0;
+
+ // The total number of available slots.
+ size_t capacity_ = 0;
y_absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{},
allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+typename raw_hash_set<P, H, E, A>::size_type EraseIf(
+ Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+ const auto initial_size = c->size();
for (auto it = c->begin(), last = c->end(); it != last;) {
if (pred(*it)) {
c->erase(it++);
@@ -1966,6 +2294,7 @@ void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
++it;
}
}
+ return initial_size - c->size();
}
namespace hashtable_debug_internal {
@@ -1981,7 +2310,7 @@ struct HashtableDebugAccess<Set, y_absl::void_t<typename Set::raw_hash_set>> {
auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
- for (int i : g.Match(container_internal::H2(hash))) {
+ for (uint32_t i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
@@ -1989,7 +2318,7 @@ struct HashtableDebugAccess<Set, y_absl::void_t<typename Set::raw_hash_set>> {
return num_probes;
++num_probes;
}
- if (g.MatchEmpty()) return num_probes;
+ if (g.MaskEmpty()) return num_probes;
seq.next();
++num_probes;
}
@@ -2031,4 +2360,6 @@ struct HashtableDebugAccess<Set, y_absl::void_t<typename Set::raw_hash_set>> {
Y_ABSL_NAMESPACE_END
} // namespace y_absl
+#undef Y_ABSL_INTERNAL_ASSERT_IS_FULL
+
#endif // Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/CMakeLists.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/CMakeLists.txt
index 941146d1d1..a862043d46 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/CMakeLists.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/CMakeLists.txt
@@ -26,7 +26,6 @@ target_sources(abseil-cpp-tstring-y_absl-debugging PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
- ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/stacktrace.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
index a79d902623..9e35a345d9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
@@ -42,7 +42,6 @@
#include <ctime>
#include "y_absl/base/attributes.h"
-#include "y_absl/base/internal/errno_saver.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/sysinfo.h"
#include "y_absl/debugging/internal/examine_stack.h"
@@ -52,7 +51,7 @@
#define Y_ABSL_HAVE_SIGACTION
// Apple WatchOS and TVOS don't allow sigaltstack
#if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \
- !(defined(TARGET_OS_TV) && TARGET_OS_TV)
+ !(defined(TARGET_OS_TV) && TARGET_OS_TV) && !defined(__QNX__)
#define Y_ABSL_HAVE_SIGALTSTACK
#endif
#endif
@@ -217,8 +216,7 @@ static void InstallOneFailureHandler(FailureSignalData* data,
#endif
static void WriteToStderr(const char* data) {
- y_absl::base_internal::ErrnoSaver errno_saver;
- y_absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
+ y_absl::raw_logging_internal::AsyncSignalSafeWriteToStderr(data, strlen(data));
}
static void WriteSignalMessage(int signo, int cpu,
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc
index 669d81f878..ccfed8bc42 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/address_is_readable.cc
@@ -30,16 +30,12 @@ bool AddressIsReadable(const void* /* addr */) { return true; }
Y_ABSL_NAMESPACE_END
} // namespace y_absl
-#else
+#else // __linux__ && !__ANDROID__
-#include <fcntl.h>
-#include <sys/syscall.h>
+#include <stdint.h>
+#include <syscall.h>
#include <unistd.h>
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-
#include "y_absl/base/internal/errno_saver.h"
#include "y_absl/base/internal/raw_logging.h"
@@ -47,93 +43,54 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
-// Pack a pid and two file descriptors into a 64-bit word,
-// using 16, 24, and 24 bits for each respectively.
-static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
- Y_ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
- "fd out of range");
- return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
-}
-
-// Unpack x into a pid and two file descriptors, where x was created with
-// Pack().
-static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
- *pid = x >> 48;
- *read_fd = (x >> 24) & 0xffffff;
- *write_fd = x & 0xffffff;
-}
+// NOTE: be extra careful about adding any interposable function calls here
+// (such as open(), read(), etc.). These symbols may be interposed and will get
+// invoked in contexts they don't expect.
+//
+// NOTE: any new system calls here may also require sandbox reconfiguration.
+//
+bool AddressIsReadable(const void *addr) {
+ // Align address on 8-byte boundary. On aarch64, checking last
+ // byte before inaccessible page returned unexpected EFAULT.
+ const uintptr_t u_addr = reinterpret_cast<uintptr_t>(addr) & ~7;
+ addr = reinterpret_cast<const void *>(u_addr);
-// Return whether the byte at *addr is readable, without faulting.
-// Save and restores errno. Returns true on systems where
-// unimplemented.
-// This is a namespace-scoped variable for correct zero-initialization.
-static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
+ // rt_sigprocmask below will succeed for this input.
+ if (addr == nullptr) return false;
-bool AddressIsReadable(const void *addr) {
y_absl::base_internal::ErrnoSaver errno_saver;
- // We test whether a byte is readable by using write(). Normally, this would
- // be done via a cached file descriptor to /dev/null, but linux fails to
- // check whether the byte is readable when the destination is /dev/null, so
- // we use a cached pipe. We store the pid of the process that created the
- // pipe to handle the case where a process forks, and the child closes all
- // the file descriptors and then calls this routine. This is not perfect:
- // the child could use the routine, then close all file descriptors and then
- // use this routine again. But the likely use of this routine is when
- // crashing, to test the validity of pages when dumping the stack. Beware
- // that we may leak file descriptors, but we're unlikely to leak many.
- int bytes_written;
- int current_pid = getpid() & 0xffff; // we use only the low order 16 bits
- do { // until we do not get EBADF trying to use file descriptors
- int pid;
- int read_fd;
- int write_fd;
- uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
- Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
- while (current_pid != pid) {
- int p[2];
- // new pipe
- if (pipe(p) != 0) {
- Y_ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
- }
- fcntl(p[0], F_SETFD, FD_CLOEXEC);
- fcntl(p[1], F_SETFD, FD_CLOEXEC);
- uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
- if (pid_and_fds.compare_exchange_strong(
- local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
- std::memory_order_relaxed)) {
- local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
- } else { // fds not exposed to other threads; we can close them.
- close(p[0]);
- close(p[1]);
- local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
- }
- Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
- }
- errno = 0;
- // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
- // and other checkers from complaining about accesses to arbitrary
- // memory.
- do {
- bytes_written = syscall(SYS_write, write_fd, addr, 1);
- } while (bytes_written == -1 && errno == EINTR);
- if (bytes_written == 1) { // remove the byte from the pipe
- char c;
- while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
- }
- }
- if (errno == EBADF) { // Descriptors invalid.
- // If pid_and_fds contains the problematic file descriptors we just used,
- // this call will forget them, and the loop will try again.
- pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
- std::memory_order_release,
- std::memory_order_relaxed);
- }
- } while (errno == EBADF);
- return bytes_written == 1;
+
+ // Here we probe with some syscall which
+ // - accepts an 8-byte region of user memory as input
+ // - tests for EFAULT before other validation
+ // - has no problematic side-effects
+ //
+ // rt_sigprocmask(2) works for this. It copies sizeof(kernel_sigset_t)==8
+ // bytes from the address into the kernel memory before any validation.
+ //
+ // The call can never succeed, since the `how` parameter is not one of
+ // SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK.
+ //
+ // This strategy depends on Linux implementation details,
+ // so we rely on the test to alert us if it stops working.
+ //
+ // Some discarded past approaches:
+ // - msync() doesn't reject PROT_NONE regions
+ // - write() on /dev/null doesn't return EFAULT
+ // - write() on a pipe requires creating it and draining the writes
+ // - connect() works but is problematic for sandboxes and needs a valid
+ // file descriptor
+ //
+ // This can never succeed (invalid first argument to sigprocmask).
+ Y_ABSL_RAW_CHECK(syscall(SYS_rt_sigprocmask, ~0, addr, nullptr,
+ /*sizeof(kernel_sigset_t)*/ 8) == -1,
+ "unexpected success");
+ Y_ABSL_RAW_CHECK(errno == EFAULT || errno == EINVAL, "unexpected errno");
+ return errno != EFAULT;
}
} // namespace debugging_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
-#endif
+#endif // __linux__ && !__ANDROID__
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc
index 1df9cca1b6..0a82359e27 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.cc
@@ -351,7 +351,11 @@ void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
Y_ABSL_RAW_CHECK(symbol && version_symbol, "");
const char *const symbol_name = image->GetDynstr(symbol->st_name);
+#if defined(__NetBSD__)
+ const int version_index = version_symbol->vs_vers & VERSYM_VERSION;
+#else
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+#endif
const ElfW(Verdef) *version_definition = nullptr;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
index c19c0e9f8e..a7dde97206 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
@@ -31,8 +31,9 @@
#error Y_ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
#endif
-#if defined(__ELF__) && !defined(__native_client__) && !defined(__asmjs__) && \
- !defined(__wasm__)
+#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \
+ !defined(__native_client__) && !defined(__asmjs__) && \
+ !defined(__wasm__) && !defined(__HAIKU__)
#define Y_ABSL_HAVE_ELF_MEM_IMAGE 1
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
index edc926cafd..3f1c0b8b79 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
@@ -20,7 +20,13 @@
#include <unistd.h>
#endif
-#ifdef __APPLE__
+#include "y_absl/base/config.h"
+
+#ifdef Y_ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#if defined(__linux__) || defined(__APPLE__)
#include <sys/ucontext.h>
#endif
@@ -37,10 +43,115 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
+namespace {
+constexpr int kDefaultDumpStackFramesLimit = 64;
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+Y_ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr;
+
+// Async-signal safe mmap allocator.
+void* Allocate(size_t num_bytes) {
+#ifdef Y_ABSL_HAVE_MMAP
+ void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ return p == MAP_FAILED ? nullptr : p;
+#else
+ (void)num_bytes;
+ return nullptr;
+#endif // Y_ABSL_HAVE_MMAP
+}
+
+void Deallocate(void* p, size_t size) {
+#ifdef Y_ABSL_HAVE_MMAP
+ ::munmap(p, size);
+#else
+ (void)p;
+ (void)size;
+#endif // Y_ABSL_HAVE_MMAP
+}
+
+// Print a program counter only.
+void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc,
+ int framesize, const char* const prefix) {
+ char buf[100];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
+ kPrintfPointerFieldWidth, pc);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize);
+ }
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding symbol.
+void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ // Symbolizes the previous address of pc because pc may be in the
+ // next function. The overrun happens when the function ends with
+ // a call to a function annotated noreturn (e.g. CHECK).
+ // If symbolization of pc-1 fails, also try pc on the off-chance
+ // that we crashed on the first instruction of a function (that
+ // actually happens very often for e.g. __restore_rt).
+ const uintptr_t prev_pc = reinterpret_cast<uintptr_t>(pc) - 1;
+ if (y_absl::Symbolize(reinterpret_cast<const char*>(prev_pc), tmp,
+ sizeof(tmp)) ||
+ y_absl::Symbolize(pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix, kPrintfPointerFieldWidth,
+ pc, symbol);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg,
+ void* const pc, void* const symbolize_pc,
+ int framesize, const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (y_absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, symbol);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize, symbol);
+ }
+ writer(buf, writer_arg);
+}
+
+} // namespace
+
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) {
+ debug_stack_trace_hook = hook;
+}
+
+SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; }
+
// Returns the program counter from signal context, nullptr if
// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc) {
+void* GetProgramCounter(void* const vuc) {
#ifdef __linux__
if (vuc != nullptr) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
@@ -82,6 +193,8 @@ void* GetProgramCounter(void* vuc) {
return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
#elif defined(__e2k__)
return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
+#elif defined(__loongarch__)
+ return reinterpret_cast<void*>(context->uc_mcontext.__pc);
#else
#error "Undefined Architecture."
#endif
@@ -120,59 +233,17 @@ void* GetProgramCounter(void* vuc) {
return nullptr;
}
-// The %p field width for printf() functions is two characters per byte,
-// and two extra for the leading "0x".
-static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
-
-// Print a program counter, its stack frame size, and its symbol name.
-// Note that there is a separate symbolize_pc argument. Return addresses may be
-// at the end of the function, and this allows the caller to back up from pc if
-// appropriate.
-static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc,
- void* symbolize_pc, int framesize,
- const char* const prefix) {
- char tmp[1024];
- const char* symbol = "(unknown)";
- if (y_absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
- symbol = tmp;
- }
- char buf[1024];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
- kPrintfPointerFieldWidth, pc, symbol);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize, symbol);
- }
- writerfn(buf, writerfn_arg);
-}
-
-// Print a program counter and the corresponding stack frame size.
-static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc, int framesize,
- const char* const prefix) {
- char buf[100];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
- kPrintfPointerFieldWidth, pc);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize);
- }
- writerfn(buf, writerfn_arg);
-}
-
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg) {
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg) {
if (pc != nullptr) {
// We don't know the stack frame size for PC, use 0.
if (symbolize_stacktrace) {
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+ DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: ");
}
}
for (int i = 0; i < depth; i++) {
@@ -182,20 +253,61 @@ void DumpPCAndFrameSizesAndStackTrace(
// call to a function annotated noreturn (e.g. CHECK). Note that we don't
// do this for pc above, as the adjustment is only correct for return
// addresses.
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i],
reinterpret_cast<char*>(stack[i]) - 1,
frame_sizes[i], " ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
- " ");
+ DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], " ");
}
}
if (min_dropped_frames > 0) {
char buf[100];
snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
min_dropped_frames);
- writerfn(buf, writerfn_arg);
+ writer(buf, writer_arg);
+ }
+}
+
+// Dump current stack trace as directed by writer.
+// Make sure this function is not inlined to avoid skipping too many top frames.
+Y_ABSL_ATTRIBUTE_NOINLINE
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg) {
+ // Print stack trace
+ void* stack_buf[kDefaultDumpStackFramesLimit];
+ void** stack = stack_buf;
+ int num_stack = kDefaultDumpStackFramesLimit;
+ int allocated_bytes = 0;
+
+ if (num_stack >= max_num_frames) {
+ // User requested fewer frames than we already have space for.
+ num_stack = max_num_frames;
+ } else {
+ const size_t needed_bytes = max_num_frames * sizeof(stack[0]);
+ void* p = Allocate(needed_bytes);
+ if (p != nullptr) { // We got the space.
+ num_stack = max_num_frames;
+ stack = reinterpret_cast<void**>(p);
+ allocated_bytes = needed_bytes;
+ }
}
+
+ size_t depth = y_absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1);
+ for (size_t i = 0; i < depth; i++) {
+ if (symbolize_stacktrace) {
+ DumpPCAndSymbol(writer, writer_arg, stack[i], " ");
+ } else {
+ DumpPC(writer, writer_arg, stack[i], " ");
+ }
+ }
+
+ auto hook = GetDebugStackTraceHook();
+ if (hook != nullptr) {
+ (*hook)(stack, depth, writer, writer_arg);
+ }
+
+ if (allocated_bytes != 0) Deallocate(stack, allocated_bytes);
}
} // namespace debugging_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h
index 4ac2c8358a..25d9da3e1e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.h
@@ -23,17 +23,39 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
+// Type of function used for printing in stack trace dumping, etc.
+// We avoid closures to keep things simple.
+typedef void OutputWriter(const char*, void*);
+
+// RegisterDebugStackTraceHook() allows to register a single routine
+// `hook` that is called each time DumpStackTrace() is called.
+// `hook` may be called from a signal handler.
+typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
+ OutputWriter* writer, void* writer_arg);
+
+// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
+// This is inherently unsafe and must be signal safe code.
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
+SymbolizeUrlEmitter GetDebugStackTraceHook();
+
// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc);
+void* GetProgramCounter(void* const vuc);
-// Uses `writerfn` to dump the program counter, stack trace, and stack
+// Uses `writer` to dump the program counter, stack trace, and stack
// frame sizes.
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg);
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg);
+
+// Dump current stack trace omitting the topmost `min_dropped_frames` stack
+// frames.
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg);
} // namespace debugging_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
index e225b4dd18..d33f47e1cd 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -176,12 +176,17 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
frame_pointer =
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc
index d735d1f933..bccf1174b7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_arm-inl.inc
@@ -112,11 +112,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; sp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h
index d93c3f1dd9..6a13b5f24a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_config.h
@@ -37,7 +37,8 @@
"y_absl/debugging/internal/stacktrace_generic-inl.inc"
#endif // defined(Y_ABSL_HAVE_THREAD_LOCAL)
-#elif defined(__EMSCRIPTEN__)
+// Emscripten stacktraces rely on JS. Do not use them in standalone mode.
+#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
#define Y_ABSL_STACKTRACE_INL_HEADER \
"y_absl/debugging/internal/stacktrace_emscripten-inl.inc"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
index 635c3dd2a6..56dcd40c67 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -231,11 +231,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 1000;
- int j = 0;
- for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc
index 4072ecc116..39a1fb2408 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -56,7 +56,7 @@ static const unsigned char *GetKernelRtSigreturnAddress() {
y_absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
// Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10.
auto lookup = [&](int type) {
- return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_4.15", type,
+ return vdso.LookupSymbol("__vdso_rt_sigreturn", "LINUX_4.15", type,
&symbol_info);
};
if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
@@ -171,26 +171,21 @@ Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
Y_ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
const void *ucp, int *min_dropped_frames) {
+ // The `frame_pointer` that is computed here points to the top of the frame.
+ // The two words preceding the address are the return address and the previous
+ // frame pointer.
#if defined(__GNUC__)
void **frame_pointer = reinterpret_cast<void **>(__builtin_frame_address(0));
#else
#error reading stack pointer not yet supported on this platform
#endif
- skip_count++; // Skip the frame for this function.
int n = 0;
-
- // The `frame_pointer` that is computed here points to the top of the frame.
- // The two words preceding the address are the return address and the previous
- // frame pointer. To find a PC value associated with the current frame, we
- // need to go down a level in the call chain. So we remember the return
- // address of the last frame seen. This does not work for the first stack
- // frame, which belongs to `UnwindImp()` but we skip the frame for
- // `UnwindImp()` anyway.
- void *prev_return_address = nullptr;
-
+ void *return_address = nullptr;
while (frame_pointer && n < max_depth) {
- // The y_absl::GetStackFrames routine si called when we are in some
+ return_address = frame_pointer[-1];
+
+ // The y_absl::GetStackFrames routine is called when we are in some
// informational context (the failure signal handler for example). Use the
// non-strict unwinding rules to produce a stack trace that is as complete
// as possible (even if it contains a few bogus entries in some rare cases).
@@ -200,26 +195,33 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
if (skip_count > 0) {
skip_count--;
} else {
- result[n] = prev_return_address;
+ result[n] = return_address;
if (IS_STACK_FRAMES) {
sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
}
n++;
}
- prev_return_address = frame_pointer[-1];
+
frame_pointer = next_frame_pointer;
}
+
if (min_dropped_frames != nullptr) {
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
frame_pointer =
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
+
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
index 6e598bc359..bd5dc13d99 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
@@ -341,12 +341,17 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 1000;
- int j = 0;
- for (; fp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp, stack_low,
stack_high);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
index ff28d04d57..282b406bb7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/vdso_support.cc
@@ -33,7 +33,7 @@
#endif
#include <unistd.h>
-#if defined(__GLIBC__) && \
+#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
(__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
#define Y_ABSL_HAVE_GETAUXVAL
#endif
@@ -50,8 +50,14 @@
#define AT_SYSINFO_EHDR 33 // for crosstoolv10
#endif
+#if defined(__NetBSD__)
+using Elf32_auxv_t = Aux32Info;
+using Elf64_auxv_t = Aux64Info;
+#endif
#if defined(__FreeBSD__)
+#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64
using Elf64_auxv_t = Elf64_Auxinfo;
+#endif
using Elf32_auxv_t = Elf32_Auxinfo;
#endif
@@ -63,7 +69,9 @@ Y_ABSL_CONST_INIT
std::atomic<const void *> VDSOSupport::vdso_base_(
debugging_internal::ElfMemImage::kInvalidBase);
-std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+Y_ABSL_CONST_INIT std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(
+ &InitAndGetCPU);
+
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
@@ -104,8 +112,13 @@ const void *VDSOSupport::Init() {
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
+#if defined(__NetBSD__)
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_v),
+ std::memory_order_relaxed);
+#else
vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
std::memory_order_relaxed);
+#endif
break;
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
index 0ac471b986..1cddc78cc8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
@@ -11,29 +11,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
+//
// Wrappers around lsan_interface functions.
-// When lsan is not linked in, these functions are not available,
-// therefore Abseil code which depends on these functions is conditioned on the
-// definition of LEAK_SANITIZER.
-#include "y_absl/base/attributes.h"
-#include "y_absl/debugging/leak_check.h"
+//
+// These are always-available run-time functions manipulating the LeakSanitizer,
+// even when the lsan_interface (and LeakSanitizer) is not available. When
+// LeakSanitizer is not linked in, these functions become no-op stubs.
-#ifndef LEAK_SANITIZER
+#include "y_absl/debugging/leak_check.h"
-namespace y_absl {
-Y_ABSL_NAMESPACE_BEGIN
-bool HaveLeakSanitizer() { return false; }
-bool LeakCheckerIsActive() { return false; }
-void DoIgnoreLeak(const void*) { }
-void RegisterLivePointers(const void*, size_t) { }
-void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
-Y_ABSL_NAMESPACE_END
-} // namespace y_absl
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
-#else
+#if defined(Y_ABSL_HAVE_LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
@@ -66,4 +56,18 @@ LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
Y_ABSL_NAMESPACE_END
} // namespace y_absl
-#endif // LEAK_SANITIZER
+#else // defined(Y_ABSL_HAVE_LEAK_SANITIZER)
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+bool HaveLeakSanitizer() { return false; }
+bool LeakCheckerIsActive() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() { }
+LeakCheckDisabler::~LeakCheckDisabler() { }
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // defined(Y_ABSL_HAVE_LEAK_SANITIZER)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h
index 154fe4805d..17256e0c03 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.h
@@ -24,7 +24,24 @@
// Note: this leak checking API is not yet supported in MSVC.
// Leak checking is enabled by default in all ASan builds.
//
-// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer
+// is enabled. To use the mode, simply pass `-fsanitize=address` to both the
+// compiler and linker. An example Bazel command could be
+//
+// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ...
+//
+// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does
+// not also use AddressSanitizer). To use the mode, simply pass
+// `-fsanitize=leak` to both the compiler and linker. Since GCC does not
+// currently provide a way of detecting this mode at compile-time, GCC users
+// must also pass -DLEAK_SANIITIZER to the compiler. An example Bazel command
+// could be
+//
+// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak
+// --linkopt=-fsanitize=leak ...
//
// -----------------------------------------------------------------------------
#ifndef Y_ABSL_DEBUGGING_LEAK_CHECK_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
index 43363339cd..57dff31090 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize.cc
@@ -23,6 +23,11 @@
#endif
#endif
+// Emscripten symbolization relies on JS. Do not use them in standalone mode.
+#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
+#define Y_ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM
+#endif
+
#if defined(Y_ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
#include "y_absl/debugging/symbolize_elf.inc"
#elif defined(Y_ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32)
@@ -31,7 +36,7 @@
#include "y_absl/debugging/symbolize_win32.inc"
#elif defined(__APPLE__)
#include "y_absl/debugging/symbolize_darwin.inc"
-#elif defined(__EMSCRIPTEN__)
+#elif defined(Y_ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM)
#include "y_absl/debugging/symbolize_emscripten.inc"
#else
#include "y_absl/debugging/symbolize_unimplemented.inc"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
index 343be4f91b..0640bfc76f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
@@ -323,6 +323,7 @@ class Symbolizer {
const ptrdiff_t relocation,
char *out, int out_size,
char *tmp_buf, int tmp_buf_size);
+ const char *GetUncachedSymbol(const void *pc);
enum {
SYMBOL_BUF_SIZE = 3072,
@@ -1145,6 +1146,14 @@ bool Symbolizer::RegisterObjFile(const char *filename,
reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
}
return true;
+ } else if (old->end_addr == start_addr &&
+ reinterpret_cast<uintptr_t>(old->start_addr) - old->offset ==
+ reinterpret_cast<uintptr_t>(start_addr) - offset &&
+ strcmp(old->filename, filename) == 0) {
+ // Two contiguous map entries that span a contiguous region of the file,
+ // perhaps because some part of the file was mlock()ed. Combine them.
+ old->end_addr = end_addr;
+ return true;
}
}
ObjFile *obj = impl->addr_map_.Add();
@@ -1333,13 +1342,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
// they are called here as well.
// To keep stack consumption low, we would like this function to not
// get inlined.
-const char *Symbolizer::GetSymbol(const void *const pc) {
- const char *entry = FindSymbolInCache(pc);
- if (entry != nullptr) {
- return entry;
- }
- symbol_buf_[0] = '\0';
-
+const char *Symbolizer::GetUncachedSymbol(const void *pc) {
ObjFile *const obj = FindObjFile(pc, 1);
ptrdiff_t relocation = 0;
int fd = -1;
@@ -1427,6 +1430,42 @@ const char *Symbolizer::GetSymbol(const void *const pc) {
return InsertSymbolInCache(pc, symbol_buf_);
}
+const char *Symbolizer::GetSymbol(const void *pc) {
+ const char *entry = FindSymbolInCache(pc);
+ if (entry != nullptr) {
+ return entry;
+ }
+ symbol_buf_[0] = '\0';
+
+#ifdef __hppa__
+ {
+ // In some contexts (e.g., return addresses), PA-RISC uses the lowest two
+ // bits of the address to indicate the privilege level. Clear those bits
+ // before trying to symbolize.
+ const auto pc_bits = reinterpret_cast<uintptr_t>(pc);
+ const auto address = pc_bits & ~0x3;
+ entry = GetUncachedSymbol(reinterpret_cast<const void *>(address));
+ if (entry != nullptr) {
+ return entry;
+ }
+
+ // In some contexts, PA-RISC also uses bit 1 of the address to indicate that
+ // this is a cross-DSO function pointer. Such function pointers actually
+ // point to a procedure label, a struct whose first 32-bit (pointer) element
+ // actually points to the function text. With no symbol found for this
+ // address so far, try interpreting it as a cross-DSO function pointer and
+ // see how that goes.
+ if (pc_bits & 0x2) {
+ return GetUncachedSymbol(*reinterpret_cast<const void *const *>(address));
+ }
+
+ return nullptr;
+ }
+#else
+ return GetUncachedSymbol(pc);
+#endif
+}
+
bool RemoveAllSymbolDecorators(void) {
if (!g_decorators_mu.TryLock()) {
// Someone else is using decorators. Get out.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/config.h
index c6d4488c51..08de490eb1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/config.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/config.h
@@ -45,14 +45,6 @@
#define Y_ABSL_FLAGS_STRIP_HELP Y_ABSL_FLAGS_STRIP_NAMES
#endif
-// Y_ABSL_FLAGS_INTERNAL_HAS_RTTI macro is used for selecting if we can use RTTI
-// for flag type identification.
-#ifdef Y_ABSL_FLAGS_INTERNAL_HAS_RTTI
-#error Y_ABSL_FLAGS_INTERNAL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
-#define Y_ABSL_FLAGS_INTERNAL_HAS_RTTI 1
-#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
-
// These macros represent the "source of truth" for the list of supported
// built-in types.
#define Y_ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/declare.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/declare.h
index f7455d0873..0b36402496 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/declare.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/declare.h
@@ -60,6 +60,14 @@ Y_ABSL_NAMESPACE_END
// The Y_ABSL_DECLARE_FLAG(type, name) macro expands to:
//
// extern y_absl::Flag<type> FLAGS_name;
-#define Y_ABSL_DECLARE_FLAG(type, name) extern ::y_absl::Flag<type> FLAGS_##name
+#define Y_ABSL_DECLARE_FLAG(type, name) Y_ABSL_DECLARE_FLAG_INTERNAL(type, name)
+
+// Internal implementation of Y_ABSL_DECLARE_FLAG to allow macro expansion of its
+// arguments. Clients must use Y_ABSL_DECLARE_FLAG instead.
+#define Y_ABSL_DECLARE_FLAG_INTERNAL(type, name) \
+ extern y_absl::Flag<type> FLAGS_##name; \
+ namespace y_absl /* block flags in namespaces */ {} \
+ /* second redeclaration is to allow applying attributes */ \
+ extern y_absl::Flag<type> FLAGS_##name
#endif // Y_ABSL_FLAGS_DECLARE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/flag.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/flag.h
index edfbff9c30..65448650d3 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/flag.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/flag.h
@@ -67,6 +67,10 @@ Y_ABSL_NAMESPACE_BEGIN
// Y_ABSL_FLAG(int, count, 0, "Count of items to process");
//
// No public methods of `y_absl::Flag<T>` are part of the Abseil Flags API.
+//
+// For type support of Abseil Flags, see the marshalling.h header file, which
+// discusses supported standard types, optional flags, and additional Abseil
+// type support.
#if !defined(_MSC_VER) || defined(__clang__)
template <typename T>
using Flag = flags_internal::Flag<T>;
@@ -265,6 +269,7 @@ Y_ABSL_NAMESPACE_END
// global name for FLAGS_no<flag_name> symbol, thus preventing the possibility
// of defining two flags with names foo and nofoo.
#define Y_ABSL_FLAG_IMPL(Type, name, default_value, help) \
+ extern ::y_absl::Flag<Type> FLAGS_##name; \
namespace y_absl /* block flags in namespaces */ {} \
Y_ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
Y_ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
index 55950b52d1..d3047ee045 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
@@ -30,6 +30,7 @@
#include "y_absl/base/call_once.h"
#include "y_absl/base/casts.h"
#include "y_absl/base/config.h"
+#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/optimization.h"
#include "y_absl/flags/config.h"
#include "y_absl/flags/internal/commandlineflag.h"
@@ -160,6 +161,8 @@ void FlagImpl::Init() {
std::memcpy(buf.data() + Sizeof(op_), &initialized,
sizeof(initialized));
}
+ // Type can contain valid uninitialized bits, e.g. padding.
+ Y_ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size());
OneWordValue().store(y_absl::bit_cast<int64_t>(buf),
std::memory_order_release);
break;
@@ -205,7 +208,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id,
if (lhs_runtime_type_id == rhs_runtime_type_id) return;
-#if defined(Y_ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef Y_ABSL_INTERNAL_HAS_RTTI
if (*lhs_runtime_type_id == *rhs_runtime_type_id) return;
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
index c5d52f9993..bce5e32656 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
@@ -163,7 +163,7 @@ inline ptrdiff_t ValueOffset(FlagOpFn op) {
// Returns an address of RTTI's typeid(T).
template <typename T>
inline const std::type_info* GenRuntimeTypeId() {
-#if defined(Y_ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef Y_ABSL_INTERNAL_HAS_RTTI
return &typeid(T);
#else
return nullptr;
@@ -303,7 +303,9 @@ constexpr FlagDefaultArg DefaultArg(char) {
///////////////////////////////////////////////////////////////////////////////
// Flag current value auxiliary structs.
-constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; }
+constexpr int64_t UninitializedFlagValue() {
+ return static_cast<int64_t>(0xababababababababll);
+}
template <typename T>
using FlagUseValueAndInitBitStorage = std::integral_constant<
@@ -755,8 +757,8 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
case FlagOp::kValueOffset: {
// Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the
// offset of the data.
- ptrdiff_t round_to = alignof(FlagValue<T>);
- ptrdiff_t offset =
+ size_t round_to = alignof(FlagValue<T>);
+ size_t offset =
(sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
return reinterpret_cast<void*>(offset);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
index 35f6a5b7c6..be84b1ea72 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
@@ -33,6 +33,7 @@
// * `double`
// * `TString`
// * `std::vector<TString>`
+// * `std::optional<T>`
// * `y_absl::LogSeverity` (provided natively for layering reasons)
//
// Note that support for integral types is implemented using overloads for
@@ -65,6 +66,42 @@
// below.)
//
// -----------------------------------------------------------------------------
+// Optional Flags
+// -----------------------------------------------------------------------------
+//
+// The Abseil flags library supports flags of type `std::optional<T>` where
+// `T` is a type of one of the supported flags. We refer to this flag type as
+// an "optional flag." An optional flag is either "valueless", holding no value
+// of type `T` (indicating that the flag has not been set) or a value of type
+// `T`. The valueless state in C++ code is represented by a value of
+// `std::nullopt` for the optional flag.
+//
+// Using `std::nullopt` as an optional flag's default value allows you to check
+// whether such a flag was ever specified on the command line:
+//
+// if (y_absl::GetFlag(FLAGS_foo).has_value()) {
+// // flag was set on command line
+// } else {
+// // flag was not passed on command line
+// }
+//
+// Using an optional flag in this manner avoids common workarounds for
+// indicating such an unset flag (such as using sentinal values to indicate this
+// state).
+//
+// An optional flag also allows a developer to pass a flag in an "unset"
+// valueless state on the command line, allowing the flag to later be set in
+// binary logic. An optional flag's valueless state is indicated by the special
+// notation of passing the value as an empty string through the syntax `--flag=`
+// or `--flag ""`.
+//
+// $ binary_with_optional --flag_in_unset_state=
+// $ binary_with_optional --flag_in_unset_state ""
+//
+// Note: as a result of the above syntax requirements, an optional flag cannot
+// be set to a `T` of any value which unparses to the empty string.
+//
+// -----------------------------------------------------------------------------
// Adding Type Support for Abseil Flags
// -----------------------------------------------------------------------------
//
@@ -162,14 +199,27 @@
#ifndef Y_ABSL_FLAGS_MARSHALLING_H_
#define Y_ABSL_FLAGS_MARSHALLING_H_
+#include "y_absl/base/config.h"
+
+#if defined(Y_ABSL_HAVE_STD_OPTIONAL) && !defined(Y_ABSL_USES_STD_OPTIONAL)
+#include <optional>
+#endif
#include <util/generic/string.h>
#include <vector>
-#include "y_absl/base/config.h"
#include "y_absl/strings/string_view.h"
+#include "y_absl/types/optional.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
+
+// Forward declaration to be used inside composable flag parse/unparse
+// implementations
+template <typename T>
+inline bool ParseFlag(y_absl::string_view input, T* dst, TString* error);
+template <typename T>
+inline TString UnparseFlag(const T& v);
+
namespace flags_internal {
// Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types.
@@ -189,6 +239,36 @@ bool AbslParseFlag(y_absl::string_view, TString*, TString*);
bool AbslParseFlag(y_absl::string_view, std::vector<TString>*, TString*);
template <typename T>
+bool AbslParseFlag(y_absl::string_view text, y_absl::optional<T>* f,
+ TString* err) {
+ if (text.empty()) {
+ *f = y_absl::nullopt;
+ return true;
+ }
+ T value;
+ if (!y_absl::ParseFlag(text, &value, err)) return false;
+
+ *f = std::move(value);
+ return true;
+}
+
+#if defined(Y_ABSL_HAVE_STD_OPTIONAL) && !defined(Y_ABSL_USES_STD_OPTIONAL)
+template <typename T>
+bool AbslParseFlag(y_absl::string_view text, std::optional<T>* f,
+ TString* err) {
+ if (text.empty()) {
+ *f = std::nullopt;
+ return true;
+ }
+ T value;
+ if (!y_absl::ParseFlag(text, &value, err)) return false;
+
+ *f = std::move(value);
+ return true;
+}
+#endif
+
+template <typename T>
bool InvokeParseFlag(y_absl::string_view input, T* dst, TString* err) {
// Comment on next line provides a good compiler error message if T
// does not have AbslParseFlag(y_absl::string_view, T*, TString*).
@@ -202,6 +282,18 @@ TString AbslUnparseFlag(y_absl::string_view v);
TString AbslUnparseFlag(const std::vector<TString>&);
template <typename T>
+TString AbslUnparseFlag(const y_absl::optional<T>& f) {
+ return f.has_value() ? y_absl::UnparseFlag(*f) : "";
+}
+
+#if defined(Y_ABSL_HAVE_STD_OPTIONAL) && !defined(Y_ABSL_USES_STD_OPTIONAL)
+template <typename T>
+TString AbslUnparseFlag(const std::optional<T>& f) {
+ return f.has_value() ? y_absl::UnparseFlag(*f) : "";
+}
+#endif
+
+template <typename T>
TString Unparse(const T& v) {
// Comment on next line provides a good compiler error message if T does not
// have UnparseFlag.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
index bf3b367b93..e828c79aa7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
@@ -30,6 +30,10 @@
#ifndef Y_ABSL_FUNCTIONAL_BIND_FRONT_H_
#define Y_ABSL_FUNCTIONAL_BIND_FRONT_H_
+#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+#include <functional> // For std::bind_front.
+#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+
#include "y_absl/functional/internal/front_binder.h"
#include "y_absl/utility/utility.h"
@@ -46,7 +50,8 @@ Y_ABSL_NAMESPACE_BEGIN
// specified. More importantly, it provides more reliable correctness guarantees
// than `std::bind()`; while `std::bind()` will silently ignore passing more
// parameters than expected, for example, `y_absl::bind_front()` will report such
-// mis-uses as errors.
+// mis-uses as errors. In C++20, `y_absl::bind_front` is replaced by
+// `std::bind_front`.
//
// y_absl::bind_front(a...) can be seen as storing the results of
// std::make_tuple(a...).
@@ -170,6 +175,9 @@ Y_ABSL_NAMESPACE_BEGIN
// // Doesn't copy "hi".
// y_absl::bind_front(Print, y_absl::string_view(hi))("Chuk");
//
+#if 0
+using std::bind_front;
+#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
template <class F, class... BoundArgs>
constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
F&& func, BoundArgs&&... args) {
@@ -177,6 +185,7 @@ constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
y_absl::in_place, y_absl::forward<F>(func),
y_absl::forward<BoundArgs>(args)...);
}
+#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
index 09fec40c7f..78814e8960 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
@@ -69,7 +69,8 @@ class FunctionRef;
// An `y_absl::FunctionRef` is a lightweight wrapper to any invokable object with
// a compatible signature. Generally, an `y_absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const
-// reference to a `std::function`.
+// reference to a `std::function`. `y_absl::FunctionRef` itself does not allocate,
+// although the wrapped invokable may.
//
// Example:
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
index f876d11dbd..00a359d224 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
@@ -26,9 +26,9 @@
// support Abseil hashing without requiring you to define a hashing
// algorithm.
// * `HashState`, a type-erased class which implements the manipulation of the
-// hash state (H) itself, contains member functions `combine()` and
-// `combine_contiguous()`, which you can use to contribute to an existing
-// hash state when hashing your types.
+// hash state (H) itself; contains member functions `combine()`,
+// `combine_contiguous()`, and `combine_unordered()`; and which you can use
+// to contribute to an existing hash state when hashing your types.
//
// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
// provides most of its utility by abstracting away the hash algorithm (and its
@@ -40,6 +40,11 @@
// each process. E.g., `y_absl::Hash<int>{}(9)` in one process and
// `y_absl::Hash<int>{}(9)` in another process are likely to differ.
//
+// `y_absl::Hash` may also produce different values from different dynamically
+// loaded libraries. For this reason, `y_absl::Hash` values must never cross
+// boundries in dynamically loaded libraries (including when used in types like
+// hash containers.)
+//
// `y_absl::Hash` is intended to strongly mix input bits with a target of passing
// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect).
//
@@ -74,7 +79,9 @@
#define Y_ABSL_HASH_HASH_H_
#include <tuple>
+#include <utility>
+#include "y_absl/functional/function_ref.h"
#include "y_absl/hash/internal/hash.h"
namespace y_absl {
@@ -107,14 +114,27 @@ Y_ABSL_NAMESPACE_BEGIN
// * std::string_view (as well as any instance of std::basic_string that
// uses char and std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
-// * All the standard ordered associative containers (provided the elements are
+// * All the standard associative containers (provided the elements are
// hashable)
// * y_absl types such as the following:
// * y_absl::string_view
-// * y_absl::InlinedVector
-// * y_absl::FixedArray
// * y_absl::uint128
// * y_absl::Time, y_absl::Duration, and y_absl::TimeZone
+// * y_absl containers (provided the elements are hashable) such as the
+// following:
+// * y_absl::flat_hash_set, y_absl::node_hash_set, y_absl::btree_set
+// * y_absl::flat_hash_map, y_absl::node_hash_map, y_absl::btree_map
+// * y_absl::btree_multiset, y_absl::btree_multimap
+// * y_absl::InlinedVector
+// * y_absl::FixedArray
+//
+// When y_absl::Hash is used to hash an unordered container with a custom hash
+// functor, the elements are hashed using default y_absl::Hash semantics, not
+// the custom hash functor. This is consistent with the behavior of
+// operator==() on unordered containers, which compares elements pairwise with
+// operator==() rather than the custom equality functor. It is usually a
+// mistake to use either operator==() or y_absl::Hash on unordered collections
+// that use functors incompatible with operator==() equality.
//
// Note: the list above is not meant to be exhaustive. Additional type support
// may be added, in which case the above list will be updated.
@@ -153,7 +173,8 @@ Y_ABSL_NAMESPACE_BEGIN
// that are otherwise difficult to extend using `AbslHashValue()`. (See the
// `HashState` class below.)
//
-// The "hash state" concept contains two member functions for mixing hash state:
+// The "hash state" concept contains three member functions for mixing hash
+// state:
//
// * `H::combine(state, values...)`
//
@@ -187,6 +208,15 @@ Y_ABSL_NAMESPACE_BEGIN
// (it may perform internal optimizations). If you need this guarantee, use a
// loop instead.
//
+// * `H::combine_unordered(state, begin, end)`
+//
+// Combines a set of elements denoted by an iterator pair into a hash
+// state, returning the updated state. Note that the existing hash
+// state is move-only and must be passed by value.
+//
+// Unlike the other two methods, the hashing is order-independent.
+// This can be used to hash unordered collections.
+//
// -----------------------------------------------------------------------------
// Adding Type Support to `y_absl::Hash`
// -----------------------------------------------------------------------------
@@ -243,8 +273,9 @@ size_t HashOf(const Types&... values) {
// classes, virtual functions, etc.). The type erasure adds overhead so it
// should be avoided unless necessary.
//
-// Note: This wrapper will only erase calls to:
+// Note: This wrapper will only erase calls to
// combine_contiguous(H, const unsigned char*, size_t)
+// RunCombineUnordered(H, CombinerF)
//
// All other calls will be handled internally and will not invoke overloads
// provided by the wrapped class.
@@ -318,6 +349,8 @@ class HashState : public hash_internal::HashStateBase<HashState> {
private:
HashState() = default;
+ friend class HashState::HashStateBase;
+
template <typename T>
static void CombineContiguousImpl(void* p, const unsigned char* first,
size_t size) {
@@ -329,16 +362,57 @@ class HashState : public hash_internal::HashStateBase<HashState> {
void Init(T* state) {
state_ = state;
combine_contiguous_ = &CombineContiguousImpl<T>;
+ run_combine_unordered_ = &RunCombineUnorderedImpl<T>;
+ }
+
+ template <typename HS>
+ struct CombineUnorderedInvoker {
+ template <typename T, typename ConsumerT>
+ void operator()(T inner_state, ConsumerT inner_cb) {
+ f(HashState::Create(&inner_state),
+ [&](HashState& inner_erased) { inner_cb(inner_erased.Real<T>()); });
+ }
+
+ y_absl::FunctionRef<void(HS, y_absl::FunctionRef<void(HS&)>)> f;
+ };
+
+ template <typename T>
+ static HashState RunCombineUnorderedImpl(
+ HashState state,
+ y_absl::FunctionRef<void(HashState, y_absl::FunctionRef<void(HashState&)>)>
+ f) {
+ // Note that this implementation assumes that inner_state and outer_state
+ // are the same type. This isn't true in the SpyHash case, but SpyHash
+ // types are move-convertible to each other, so this still works.
+ T& real_state = state.Real<T>();
+ real_state = T::RunCombineUnordered(
+ std::move(real_state), CombineUnorderedInvoker<HashState>{f});
+ return state;
+ }
+
+ template <typename CombinerT>
+ static HashState RunCombineUnordered(HashState state, CombinerT combiner) {
+ auto* run = state.run_combine_unordered_;
+ return run(std::move(state), std::ref(combiner));
}
// Do not erase an already erased state.
void Init(HashState* state) {
state_ = state->state_;
combine_contiguous_ = state->combine_contiguous_;
+ run_combine_unordered_ = state->run_combine_unordered_;
+ }
+
+ template <typename T>
+ T& Real() {
+ return *static_cast<T*>(state_);
}
void* state_;
void (*combine_contiguous_)(void*, const unsigned char*, size_t);
+ HashState (*run_combine_unordered_)(
+ HashState state,
+ y_absl::FunctionRef<void(HashState, y_absl::FunctionRef<void(HashState&)>)>);
};
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
index a14b74ed74..f295ea6018 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
@@ -23,6 +23,7 @@
#include <array>
#include <bitset>
#include <cmath>
+#include <cstddef>
#include <cstring>
#include <deque>
#include <forward_list>
@@ -36,6 +37,8 @@
#include <util/generic/string.h>
#include <tuple>
#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -54,6 +57,9 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
+
+class HashState;
+
namespace hash_internal {
// Internal detail: Large buffers are hashed in smaller chunks. This function
@@ -115,24 +121,66 @@ class PiecewiseCombiner {
size_t position_;
};
+// is_hashable()
+//
+// Trait class which returns true if T is hashable by the y_absl::Hash framework.
+// Used for the AbslHashValue implementations for composite types below.
+template <typename T>
+struct is_hashable;
+
// HashStateBase
//
-// A hash state object represents an intermediate state in the computation
-// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
-// base class for hash state implementations. Developers adding type support
-// for `y_absl::Hash` should not rely on any parts of the state object other than
-// the following member functions:
+// An internal implementation detail that contains common implementation details
+// for all of the "hash state objects" objects generated by Abseil. This is not
+// a public API; users should not create classes that inherit from this.
+//
+// A hash state object is the template argument `H` passed to `AbslHashValue`.
+// It represents an intermediate state in the computation of an unspecified hash
+// algorithm. `HashStateBase` provides a CRTP style base class for hash state
+// implementations. Developers adding type support for `y_absl::Hash` should not
+// rely on any parts of the state object other than the following member
+// functions:
//
// * HashStateBase::combine()
// * HashStateBase::combine_contiguous()
+// * HashStateBase::combine_unordered()
//
-// A derived hash state class of type `H` must provide a static member function
+// A derived hash state class of type `H` must provide a public member function
// with a signature similar to the following:
//
// `static H combine_contiguous(H state, const unsigned char*, size_t)`.
//
+// It must also provide a private template method named RunCombineUnordered.
+//
+// A "consumer" is a 1-arg functor returning void. Its argument is a reference
+// to an inner hash state object, and it may be called multiple times. When
+// called, the functor consumes the entropy from the provided state object,
+// and resets that object to its empty state.
+//
+// A "combiner" is a stateless 2-arg functor returning void. Its arguments are
+// an inner hash state object and an ElementStateConsumer functor. A combiner
+// uses the provided inner hash state object to hash each element of the
+// container, passing the inner hash state object to the consumer after hashing
+// each element.
+//
+// Given these definitions, a derived hash state class of type H
+// must provide a private template method with a signature similar to the
+// following:
+//
+// `template <typename CombinerT>`
+// `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
+//
+// This function is responsible for constructing the inner state object and
+// providing a consumer to the combiner. It uses side effects of the consumer
+// and combiner to mix the state of each element in an order-independent manner,
+// and uses this to return an updated value of `outer_state`.
+//
+// This inside-out approach generates efficient object code in the normal case,
+// but allows us to use stack storage to implement the y_absl::HashState type
+// erasure mechanism (avoiding heap allocations while hashing).
+//
// `HashStateBase` will provide a complete implementation for a hash state
-// object in terms of this method.
+// object in terms of these two methods.
//
// Example:
//
@@ -141,6 +189,10 @@ class PiecewiseCombiner {
// static H combine_contiguous(H state, const unsigned char*, size_t);
// using MyHashState::HashStateBase::combine;
// using MyHashState::HashStateBase::combine_contiguous;
+// using MyHashState::HashStateBase::combine_unordered;
+// private:
+// template <typename CombinerT>
+// static H RunCombineUnordered(H state, CombinerT combiner);
// };
template <typename H>
class HashStateBase {
@@ -181,7 +233,30 @@ class HashStateBase {
template <typename T>
static H combine_contiguous(H state, const T* data, size_t size);
+ template <typename I>
+ static H combine_unordered(H state, I begin, I end);
+
using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+
+ template <typename T>
+ using is_hashable = y_absl::hash_internal::is_hashable<T>;
+
+ private:
+ // Common implementation of the iteration step of a "combiner", as described
+ // above.
+ template <typename I>
+ struct CombineUnorderedCallback {
+ I begin;
+ I end;
+
+ template <typename InnerH, typename ElementStateConsumer>
+ void operator()(InnerH inner_state, ElementStateConsumer cb) {
+ for (; begin != end; ++begin) {
+ inner_state = H::combine(std::move(inner_state), *begin);
+ cb(inner_state);
+ }
+ }
+ };
};
// is_uniquely_represented
@@ -346,17 +421,43 @@ H AbslHashValue(H hash_state, std::nullptr_t) {
return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
}
+// AbslHashValue() for hashing pointers-to-member
+template <typename H, typename T, typename C>
+H AbslHashValue(H hash_state, T C::* ptr) {
+ auto salient_ptm_size = [](std::size_t n) -> std::size_t {
+#if defined(_MSC_VER)
+ // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
+ // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
+ // padding (namely when they have 1 or 3 ints). The value below is a lower
+ // bound on the number of salient, non-padding bytes that we use for
+ // hashing.
+ if (alignof(T C::*) == alignof(int)) {
+ // No padding when all subobjects have the same size as the total
+ // alignment. This happens in 32-bit mode.
+ return n;
+ } else {
+ // Padding for 1 int (size 16) or 3 ints (size 24).
+ // With 2 ints, the size is 16 with no padding, which we pessimize.
+ return n == 24 ? 20 : n == 16 ? 12 : n;
+ }
+#else
+ // On other platforms, we assume that pointers-to-members do not have
+ // padding.
+#ifdef __cpp_lib_has_unique_object_representations
+ static_assert(std::has_unique_object_representations_v<T C::*>);
+#endif // __cpp_lib_has_unique_object_representations
+ return n;
+#endif
+ };
+ return H::combine_contiguous(std::move(hash_state),
+ reinterpret_cast<unsigned char*>(&ptr),
+ salient_ptm_size(sizeof ptr));
+}
+
// -----------------------------------------------------------------------------
// AbslHashValue for Composite Types
// -----------------------------------------------------------------------------
-// is_hashable()
-//
-// Trait class which returns true if T is hashable by the y_absl::Hash framework.
-// Used for the AbslHashValue implementations for composite types below.
-template <typename T>
-struct is_hashable;
-
// AbslHashValue() for hashing pairs
template <typename H, typename T1, typename T2>
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
@@ -502,10 +603,11 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
vector.size());
}
+// AbslHashValue special cases for hashing std::vector<bool>
+
#if defined(Y_ABSL_IS_BIG_ENDIAN) && \
(defined(__GLIBCXX__) || defined(__GLIBCPP__))
-// AbslHashValue for hashing std::vector<bool>
-//
+
// std::hash in libstdc++ does not work correctly with vector<bool> on Big
// Endian platforms therefore we need to implement a custom AbslHashValue for
// it. More details on the bug:
@@ -521,6 +623,22 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
}
return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
}
+#else
+// When not working around the libstdc++ bug above, we still have to contend
+// with the fact that std::hash<vector<bool>> is often poor quality, hashing
+// directly on the internal words and on no other state. On these platforms,
+// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
+//
+// Mixing in the size (as we do in our other vector<> implementations) on top
+// of the library-provided hash implementation avoids this QOI issue.
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ return H::combine(std::move(hash_state),
+ std::hash<std::vector<T, Allocator>>{}(vector),
+ vector.size());
+}
#endif
// -----------------------------------------------------------------------------
@@ -573,6 +691,55 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
}
// -----------------------------------------------------------------------------
+// AbslHashValue for Unordered Associative Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state,
+ const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// -----------------------------------------------------------------------------
// AbslHashValue for Wrapper Types
// -----------------------------------------------------------------------------
@@ -815,6 +982,31 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// move-only ensures that there is only one non-moved-from object.
MixingHashState() : state_(Seed()) {}
+ friend class MixingHashState::HashStateBase;
+
+ template <typename CombinerT>
+ static MixingHashState RunCombineUnordered(MixingHashState state,
+ CombinerT combiner) {
+ uint64_t unordered_state = 0;
+ combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
+ // Add the hash state of the element to the running total, but mix the
+ // carry bit back into the low bit. This in intended to avoid losing
+ // entropy to overflow, especially when unordered_multisets contain
+ // multiple copies of the same value.
+ auto element_state = inner_state.state_;
+ unordered_state += element_state;
+ if (unordered_state < element_state) {
+ ++unordered_state;
+ }
+ inner_state = MixingHashState{};
+ });
+ return MixingHashState::combine(std::move(state), unordered_state);
+ }
+
+ // Allow the HashState type-erasure implementation to invoke
+ // RunCombinedUnordered() directly.
+ friend class y_absl::HashState;
+
// Workaround for MSVC bug.
// We make the type copyable to fix the calling convention, even though we
// never actually copy it. Keep it private to not affect the public API of the
@@ -898,15 +1090,10 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
}
Y_ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
-#if defined(__aarch64__)
- // On AArch64, calculating a 128-bit product is inefficient, because it
- // requires a sequence of two instructions to calculate the upper and lower
- // halves of the result.
- using MultType = uint64_t;
-#else
+ // Though the 128-bit product on AArch64 needs two instructions, it is
+ // still a good balance between speed and hash quality.
using MultType =
y_absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
-#endif
// We do the addition in 64-bit space to make sure the 128-bit
// multiplication is fast. If we were to do it as MultType the compiler has
// to assume that the high word is non-zero and needs to perform 2
@@ -1049,6 +1236,14 @@ H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
return hash_internal::hash_range_or_bytes(std::move(state), data, size);
}
+// HashStateBase::combine_unordered()
+template <typename H>
+template <typename I>
+H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
+ return H::RunCombineUnordered(std::move(state),
+ CombineUnorderedCallback<I>{begin, end});
+}
+
// HashStateBase::PiecewiseCombiner::add_buffer()
template <typename H>
H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
index a4dcd71f7a..75cd272d77 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
@@ -133,7 +133,8 @@ template <class T>
Y_ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
bit_width(T x) noexcept {
- return std::numeric_limits<T>::digits - countl_zero(x);
+ return std::numeric_limits<T>::digits -
+ static_cast<unsigned int>(countl_zero(x));
}
// Returns: If x == 0, 0; otherwise the maximal value y such that
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
index 90b7181c7b..fe9d65cc63 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
@@ -42,11 +42,11 @@ namespace {
// Returns: 2
inline Y_ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) {
if (uint64_t hi = Uint128High64(n)) {
- Y_ABSL_INTERNAL_ASSUME(hi != 0);
+ Y_ABSL_ASSUME(hi != 0);
return 127 - countl_zero(hi);
}
const uint64_t low = Uint128Low64(n);
- Y_ABSL_INTERNAL_ASSUME(low != 0);
+ Y_ABSL_ASSUME(low != 0);
return 63 - countl_zero(low);
}
@@ -332,6 +332,7 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
Y_ABSL_NAMESPACE_END
} // namespace y_absl
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
namespace std {
constexpr bool numeric_limits<y_absl::uint128>::is_specialized;
constexpr bool numeric_limits<y_absl::uint128>::is_signed;
@@ -381,3 +382,4 @@ constexpr int numeric_limits<y_absl::int128>::max_exponent10;
constexpr bool numeric_limits<y_absl::int128>::traps;
constexpr bool numeric_limits<y_absl::int128>::tinyness_before;
} // namespace std
+#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
index ef5ff99c78..34c920a032 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
@@ -44,7 +44,7 @@
// builtin type. We need to make sure not to define operator wchar_t()
// alongside operator unsigned short() in these instances.
#define Y_ABSL_INTERNAL_WCHAR_T __wchar_t
-#if defined(_M_X64)
+#if defined(_M_X64) && !defined(_M_ARM64EC)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif // defined(_M_X64)
@@ -980,7 +980,7 @@ inline uint128 operator*(uint128 lhs, uint128 rhs) {
// can be used for uint128 storage.
return static_cast<unsigned __int128>(lhs) *
static_cast<unsigned __int128>(rhs);
-#elif defined(_MSC_VER) && defined(_M_X64)
+#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
uint64_t carry;
uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry);
return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) +
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h
index 9330aae739..f820d567c9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/internal/sample_recorder.h
@@ -46,6 +46,7 @@ struct Sample {
y_absl::Mutex init_mu;
T* next = nullptr;
T* dead Y_ABSL_GUARDED_BY(init_mu) = nullptr;
+ int64_t weight; // How many sampling events were required to sample this one.
};
// Holds samples and their associated stack traces with a soft limit of
@@ -59,7 +60,8 @@ class SampleRecorder {
~SampleRecorder();
// Registers for sampling. Returns an opaque registration info.
- T* Register();
+ template <typename... Targs>
+ T* Register(Targs&&... args);
// Unregisters the sample.
void Unregister(T* sample);
@@ -75,12 +77,14 @@ class SampleRecorder {
// samples that have been dropped.
int64_t Iterate(const std::function<void(const T& stack)>& f);
+ int32_t GetMaxSamples() const;
void SetMaxSamples(int32_t max);
private:
void PushNew(T* sample);
void PushDead(T* sample);
- T* PopDead();
+ template <typename... Targs>
+ T* PopDead(Targs... args);
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
@@ -162,7 +166,8 @@ void SampleRecorder<T>::PushDead(T* sample) {
}
template <typename T>
-T* SampleRecorder<T>::PopDead() {
+template <typename... Targs>
+T* SampleRecorder<T>::PopDead(Targs... args) {
y_absl::MutexLock graveyard_lock(&graveyard_.init_mu);
// The list is circular, so eventually it collapses down to
@@ -174,12 +179,13 @@ T* SampleRecorder<T>::PopDead() {
y_absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead;
sample->dead = nullptr;
- sample->PrepareForSampling();
+ sample->PrepareForSampling(std::forward<Targs>(args)...);
return sample;
}
template <typename T>
-T* SampleRecorder<T>::Register() {
+template <typename... Targs>
+T* SampleRecorder<T>::Register(Targs&&... args) {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
@@ -187,10 +193,14 @@ T* SampleRecorder<T>::Register() {
return nullptr;
}
- T* sample = PopDead();
+ T* sample = PopDead(args...);
if (sample == nullptr) {
// Resurrection failed. Hire a new warlock.
sample = new T();
+ {
+ y_absl::MutexLock sample_lock(&sample->init_mu);
+ sample->PrepareForSampling(std::forward<Targs>(args)...);
+ }
PushNew(sample);
}
@@ -223,6 +233,11 @@ void SampleRecorder<T>::SetMaxSamples(int32_t max) {
max_samples_.store(max, std::memory_order_release);
}
+template <typename T>
+int32_t SampleRecorder<T>::GetMaxSamples() const {
+ return max_samples_.load(std::memory_order_acquire);
+}
+
} // namespace profiling_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
index 3195a6eb4f..87259b1fc4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
@@ -15,7 +15,9 @@
#define Y_ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
#include <util/generic/string.h>
+#include <utility>
+#include "y_absl/base/attributes.h"
#include "y_absl/container/inlined_vector.h"
#include "y_absl/strings/cord.h"
@@ -25,7 +27,14 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
// as part of a class definitions (b/6995610), so we use a forward declaration.
+//
+// TODO(b/176172494): Y_ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+#if Y_ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] Status;
+#else
class Y_ABSL_MUST_USE_RESULT Status;
+#endif
Y_ABSL_NAMESPACE_END
} // namespace y_absl
#endif // !SWIG
@@ -61,6 +70,14 @@ struct StatusRep {
};
y_absl::StatusCode MapToLocalCode(int value);
+
+// Returns a pointer to a newly-allocated string with the given `prefix`,
+// suitable for output as an error message in assertion/`CHECK()` failures.
+//
+// This is an internal implementation detail for Abseil logging.
+TString* MakeCheckFailString(const y_absl::Status* status,
+ const char* prefix);
+
} // namespace status_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
index 2bfa4be70a..3bcf02dbbd 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
@@ -13,9 +13,12 @@
// limitations under the License.
#include "y_absl/status/status.h"
+#include <errno.h>
+
#include <cassert>
#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/strerror.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/debugging/symbolize.h"
#include "y_absl/status/status_payload_printer.h"
@@ -185,11 +188,16 @@ void Status::ForEachPayload(
}
const TString* Status::EmptyString() {
- static TString* empty_string = new TString();
- return empty_string;
+ static union EmptyString {
+ TString str;
+ ~EmptyString() {}
+ } empty = {{}};
+ return &empty.str;
}
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr const char Status::kMovedFromString[];
+#endif
const TString* Status::MovedFromString() {
static TString* moved_from_string = new TString(kMovedFromString);
@@ -440,5 +448,169 @@ bool IsUnknown(const Status& status) {
return status.code() == y_absl::StatusCode::kUnknown;
}
+StatusCode ErrnoToStatusCode(int error_number) {
+ switch (error_number) {
+ case 0:
+ return StatusCode::kOk;
+ case EINVAL: // Invalid argument
+ case ENAMETOOLONG: // Filename too long
+ case E2BIG: // Argument list too long
+ case EDESTADDRREQ: // Destination address required
+ case EDOM: // Mathematics argument out of domain of function
+ case EFAULT: // Bad address
+ case EILSEQ: // Illegal byte sequence
+ case ENOPROTOOPT: // Protocol not available
+ case ENOSTR: // Not a STREAM
+ case ENOTSOCK: // Not a socket
+ case ENOTTY: // Inappropriate I/O control operation
+ case EPROTOTYPE: // Protocol wrong type for socket
+ case ESPIPE: // Invalid seek
+ return StatusCode::kInvalidArgument;
+ case ETIMEDOUT: // Connection timed out
+ case ETIME: // Timer expired
+ return StatusCode::kDeadlineExceeded;
+ case ENODEV: // No such device
+ case ENOENT: // No such file or directory
+#ifdef ENOMEDIUM
+ case ENOMEDIUM: // No medium found
+#endif
+ case ENXIO: // No such device or address
+ case ESRCH: // No such process
+ return StatusCode::kNotFound;
+ case EEXIST: // File exists
+ case EADDRNOTAVAIL: // Address not available
+ case EALREADY: // Connection already in progress
+#ifdef ENOTUNIQ
+ case ENOTUNIQ: // Name not unique on network
+#endif
+ return StatusCode::kAlreadyExists;
+ case EPERM: // Operation not permitted
+ case EACCES: // Permission denied
+#ifdef ENOKEY
+ case ENOKEY: // Required key not available
+#endif
+ case EROFS: // Read only file system
+ return StatusCode::kPermissionDenied;
+ case ENOTEMPTY: // Directory not empty
+ case EISDIR: // Is a directory
+ case ENOTDIR: // Not a directory
+ case EADDRINUSE: // Address already in use
+ case EBADF: // Invalid file descriptor
+#ifdef EBADFD
+ case EBADFD: // File descriptor in bad state
+#endif
+ case EBUSY: // Device or resource busy
+ case ECHILD: // No child processes
+ case EISCONN: // Socket is connected
+#ifdef EISNAM
+ case EISNAM: // Is a named type file
+#endif
+#ifdef ENOTBLK
+ case ENOTBLK: // Block device required
+#endif
+ case ENOTCONN: // The socket is not connected
+ case EPIPE: // Broken pipe
+#ifdef ESHUTDOWN
+ case ESHUTDOWN: // Cannot send after transport endpoint shutdown
+#endif
+ case ETXTBSY: // Text file busy
+#ifdef EUNATCH
+ case EUNATCH: // Protocol driver not attached
+#endif
+ return StatusCode::kFailedPrecondition;
+ case ENOSPC: // No space left on device
+#ifdef EDQUOT
+ case EDQUOT: // Disk quota exceeded
+#endif
+ case EMFILE: // Too many open files
+ case EMLINK: // Too many links
+ case ENFILE: // Too many open files in system
+ case ENOBUFS: // No buffer space available
+ case ENODATA: // No message is available on the STREAM read queue
+ case ENOMEM: // Not enough space
+ case ENOSR: // No STREAM resources
+#ifdef EUSERS
+ case EUSERS: // Too many users
+#endif
+ return StatusCode::kResourceExhausted;
+#ifdef ECHRNG
+ case ECHRNG: // Channel number out of range
+#endif
+ case EFBIG: // File too large
+ case EOVERFLOW: // Value too large to be stored in data type
+ case ERANGE: // Result too large
+ return StatusCode::kOutOfRange;
+#ifdef ENOPKG
+ case ENOPKG: // Package not installed
+#endif
+ case ENOSYS: // Function not implemented
+ case ENOTSUP: // Operation not supported
+ case EAFNOSUPPORT: // Address family not supported
+#ifdef EPFNOSUPPORT
+ case EPFNOSUPPORT: // Protocol family not supported
+#endif
+ case EPROTONOSUPPORT: // Protocol not supported
+#ifdef ESOCKTNOSUPPORT
+ case ESOCKTNOSUPPORT: // Socket type not supported
+#endif
+ case EXDEV: // Improper link
+ return StatusCode::kUnimplemented;
+ case EAGAIN: // Resource temporarily unavailable
+#ifdef ECOMM
+ case ECOMM: // Communication error on send
+#endif
+ case ECONNREFUSED: // Connection refused
+ case ECONNABORTED: // Connection aborted
+ case ECONNRESET: // Connection reset
+ case EINTR: // Interrupted function call
+#ifdef EHOSTDOWN
+ case EHOSTDOWN: // Host is down
+#endif
+ case EHOSTUNREACH: // Host is unreachable
+ case ENETDOWN: // Network is down
+ case ENETRESET: // Connection aborted by network
+ case ENETUNREACH: // Network unreachable
+ case ENOLCK: // No locks available
+ case ENOLINK: // Link has been severed
+#ifdef ENONET
+ case ENONET: // Machine is not on the network
+#endif
+ return StatusCode::kUnavailable;
+ case EDEADLK: // Resource deadlock avoided
+#ifdef ESTALE
+ case ESTALE: // Stale file handle
+#endif
+ return StatusCode::kAborted;
+ case ECANCELED: // Operation cancelled
+ return StatusCode::kCancelled;
+ default:
+ return StatusCode::kUnknown;
+ }
+}
+
+namespace {
+TString MessageForErrnoToStatus(int error_number,
+ y_absl::string_view message) {
+ return y_absl::StrCat(message, ": ",
+ y_absl::base_internal::StrError(error_number));
+}
+} // namespace
+
+Status ErrnoToStatus(int error_number, y_absl::string_view message) {
+ return Status(ErrnoToStatusCode(error_number),
+ MessageForErrnoToStatus(error_number, message));
+}
+
+namespace status_internal {
+
+TString* MakeCheckFailString(const y_absl::Status* status,
+ const char* prefix) {
+ return new TString(
+ y_absl::StrCat(prefix, " (",
+ status->ToString(StatusToStringMode::kWithEverything), ")"));
+}
+
+} // namespace status_internal
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
index b2acacc709..a5705cec46 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
@@ -24,11 +24,11 @@
// * A set of helper functions for creating status codes and checking their
// values
//
-// Within Google, `y_absl::Status` is the primary mechanism for gracefully
-// handling errors across API boundaries (and in particular across RPC
-// boundaries). Some of these errors may be recoverable, but others may not.
-// Most functions that can produce a recoverable error should be designed to
-// return an `y_absl::Status` (or `y_absl::StatusOr`).
+// Within Google, `y_absl::Status` is the primary mechanism for communicating
+// errors in C++, and is used to represent error state in both in-process
+// library calls as well as RPC calls. Some of these errors may be recoverable,
+// but others may not. Most functions that can produce a recoverable error
+// should be designed to return an `y_absl::Status` (or `y_absl::StatusOr`).
//
// Example:
//
@@ -469,8 +469,9 @@ class Status final {
// Status::ok()
//
- // Returns `true` if `this->ok()`. Prefer checking for an OK status using this
- // member function.
+ // Returns `true` if `this->code()` == `y_absl::StatusCode::kOk`,
+ // indicating the absence of an error.
+ // Prefer checking for an OK status using this member function.
Y_ABSL_MUST_USE_RESULT bool ok() const;
// Status::code()
@@ -532,7 +533,7 @@ class Status final {
//----------------------------------------------------------------------------
// A payload may be attached to a status to provide additional context to an
- // error that may not be satisifed by an existing `y_absl::StatusCode`.
+ // error that may not be satisfied by an existing `y_absl::StatusCode`.
// Typically, this payload serves one of several purposes:
//
// * It may provide more fine-grained semantic information about the error
@@ -612,10 +613,6 @@ class Status final {
const status_internal::Payloads* GetPayloads() const;
status_internal::Payloads* GetPayloads();
- // Takes ownership of payload.
- static uintptr_t NewRep(
- y_absl::StatusCode code, y_absl::string_view msg,
- std::unique_ptr<status_internal::Payloads> payload);
static bool EqualsSlow(const y_absl::Status& a, const y_absl::Status& b);
// MSVC 14.0 limitation requires the const.
@@ -741,6 +738,19 @@ Status UnavailableError(y_absl::string_view message);
Status UnimplementedError(y_absl::string_view message);
Status UnknownError(y_absl::string_view message);
+// ErrnoToStatusCode()
+//
+// Returns the StatusCode for `error_number`, which should be an `errno` value.
+// See https://en.cppreference.com/w/cpp/error/errno_macros and similar
+// references.
+y_absl::StatusCode ErrnoToStatusCode(int error_number);
+
+// ErrnoToStatus()
+//
+// Convenience function that creates a `y_absl::Status` using an `error_number`,
+// which should be an `errno` value.
+Status ErrnoToStatus(int error_number, y_absl::string_view message);
+
//------------------------------------------------------------------------------
// Implementation details follow
//------------------------------------------------------------------------------
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
index 6fd14e9819..428ca28a3b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
@@ -106,7 +106,13 @@ class BadStatusOrAccess : public std::exception {
// Returned StatusOr objects may not be ignored.
template <typename T>
+#if Y_ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+// TODO(b/176172494): Y_ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+class [[nodiscard]] StatusOr;
+#else
class Y_ABSL_MUST_USE_RESULT StatusOr;
+#endif // Y_ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
// y_absl::StatusOr<T>
//
@@ -156,8 +162,8 @@ class Y_ABSL_MUST_USE_RESULT StatusOr;
// A `y_absl::StatusOr<T*>` can be constructed from a null pointer like any other
// pointer value, and the result will be that `ok()` returns `true` and
// `value()` returns `nullptr`. Checking the value of pointer in an
-// `y_absl::StatusOr<T>` generally requires a bit more care, to ensure both that a
-// value is present and that value is not null:
+// `y_absl::StatusOr<T*>` generally requires a bit more care, to ensure both that
+// a value is present and that value is not null:
//
// StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
// if (!result.ok()) {
@@ -471,7 +477,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// StatusOr<T>::ok()
//
// Returns whether or not this `y_absl::StatusOr<T>` holds a `T` value. This
- // member function is analagous to `y_absl::Status::ok()` and should be used
+ // member function is analogous to `y_absl::Status::ok()` and should be used
// similarly to check the status of return values.
//
// Example:
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/CMakeLists.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/CMakeLists.txt
index c14b657da5..c855380674 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/CMakeLists.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/CMakeLists.txt
@@ -25,6 +25,8 @@ target_sources(abseil-cpp-tstring-y_absl-strings PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_parse.cc
@@ -33,6 +35,7 @@ target_sources(abseil-cpp-tstring-y_absl-strings PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_functions.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h
index 2050f8f1a9..02a6185ae6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.h
@@ -133,7 +133,7 @@ inline bool ascii_isdigit(unsigned char c) { return c >= '0' && c <= '9'; }
// ascii_isprint()
//
-// Determines whether the given character is printable, including whitespace.
+// Determines whether the given character is printable, including spaces.
inline bool ascii_isprint(unsigned char c) { return c >= 32 && c < 127; }
// ascii_isgraph()
@@ -197,7 +197,7 @@ Y_ABSL_MUST_USE_RESULT inline TString AsciiStrToUpper(y_absl::string_view s) {
Y_ABSL_MUST_USE_RESULT inline y_absl::string_view StripLeadingAsciiWhitespace(
y_absl::string_view str) {
auto it = std::find_if_not(str.begin(), str.end(), y_absl::ascii_isspace);
- return str.substr(it - str.begin());
+ return str.substr(static_cast<size_t>(it - str.begin()));
}
// Strips in place whitespace from the beginning of the given string.
@@ -211,13 +211,13 @@ inline void StripLeadingAsciiWhitespace(TString* str) {
Y_ABSL_MUST_USE_RESULT inline y_absl::string_view StripTrailingAsciiWhitespace(
y_absl::string_view str) {
auto it = std::find_if_not(str.rbegin(), str.rend(), y_absl::ascii_isspace);
- return str.substr(0, str.rend() - it);
+ return str.substr(0, static_cast<size_t>(str.rend() - it));
}
// Strips in place whitespace from the end of the given string
inline void StripTrailingAsciiWhitespace(TString* str) {
auto it = std::find_if_not(str->rbegin(), str->rend(), y_absl::ascii_isspace);
- str->erase(str->rend() - it);
+ str->erase(static_cast<size_t>(str->rend() - it));
}
// Returns y_absl::string_view with whitespace stripped from both ends of the
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
index 262a84036b..8559893616 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
@@ -34,9 +34,12 @@
#include "y_absl/base/port.h"
#include "y_absl/container/fixed_array.h"
#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/cord_buffer.h"
#include "y_absl/strings/escaping.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_crc.h"
#include "y_absl/strings/internal/cord_rep_flat.h"
#include "y_absl/strings/internal/cordz_statistics.h"
#include "y_absl/strings/internal/cordz_update_scope.h"
@@ -52,7 +55,7 @@ Y_ABSL_NAMESPACE_BEGIN
using ::y_absl::cord_internal::CordRep;
using ::y_absl::cord_internal::CordRepBtree;
-using ::y_absl::cord_internal::CordRepConcat;
+using ::y_absl::cord_internal::CordRepCrc;
using ::y_absl::cord_internal::CordRepExternal;
using ::y_absl::cord_internal::CordRepFlat;
using ::y_absl::cord_internal::CordRepSubstring;
@@ -64,56 +67,6 @@ using ::y_absl::cord_internal::kMinFlatLength;
using ::y_absl::cord_internal::kInlinedVectorSize;
using ::y_absl::cord_internal::kMaxBytesToCopy;
-constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
- return n == 0 ? a : Fibonacci(n - 1, b, a + b);
-}
-
-static_assert(Fibonacci(63) == 6557470319842,
- "Fibonacci values computed incorrectly");
-
-// Minimum length required for a given depth tree -- a tree is considered
-// balanced if
-// length(t) >= min_length[depth(t)]
-// The root node depth is allowed to become twice as large to reduce rebalancing
-// for larger strings (see IsRootBalanced).
-static constexpr uint64_t min_length[] = {
- Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5),
- Fibonacci(6), Fibonacci(7), Fibonacci(8), Fibonacci(9),
- Fibonacci(10), Fibonacci(11), Fibonacci(12), Fibonacci(13),
- Fibonacci(14), Fibonacci(15), Fibonacci(16), Fibonacci(17),
- Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21),
- Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25),
- Fibonacci(26), Fibonacci(27), Fibonacci(28), Fibonacci(29),
- Fibonacci(30), Fibonacci(31), Fibonacci(32), Fibonacci(33),
- Fibonacci(34), Fibonacci(35), Fibonacci(36), Fibonacci(37),
- Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41),
- Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45),
- Fibonacci(46), Fibonacci(47),
- 0xffffffffffffffffull, // Avoid overflow
-};
-
-static const int kMinLengthSize = Y_ABSL_ARRAYSIZE(min_length);
-
-static inline bool btree_enabled() {
- return cord_internal::cord_btree_enabled.load(
- std::memory_order_relaxed);
-}
-
-static inline bool IsRootBalanced(CordRep* node) {
- if (!node->IsConcat()) {
- return true;
- } else if (node->concat()->depth() <= 15) {
- return true;
- } else if (node->concat()->depth() > kMinLengthSize) {
- return false;
- } else {
- // Allow depth to become twice as large as implied by fibonacci rule to
- // reduce rebalancing for larger strings.
- return (node->length >= min_length[node->concat()->depth() / 2]);
- }
-}
-
-static CordRep* Rebalance(CordRep* node);
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
int indent = 0);
static bool VerifyNode(CordRep* root, CordRep* start_node,
@@ -135,75 +88,6 @@ static inline CordRep* VerifyTree(CordRep* node) {
return node;
}
-// Return the depth of a node
-static int Depth(const CordRep* rep) {
- if (rep->IsConcat()) {
- return rep->concat()->depth();
- } else {
- return 0;
- }
-}
-
-static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
- CordRep* right) {
- concat->left = left;
- concat->right = right;
-
- concat->length = left->length + right->length;
- concat->set_depth(1 + std::max(Depth(left), Depth(right)));
-}
-
-// Create a concatenation of the specified nodes.
-// Does not change the refcounts of "left" and "right".
-// The returned node has a refcount of 1.
-static CordRep* RawConcat(CordRep* left, CordRep* right) {
- // Avoid making degenerate concat nodes (one child is empty)
- if (left == nullptr) return right;
- if (right == nullptr) return left;
- if (left->length == 0) {
- CordRep::Unref(left);
- return right;
- }
- if (right->length == 0) {
- CordRep::Unref(right);
- return left;
- }
-
- CordRepConcat* rep = new CordRepConcat();
- rep->tag = cord_internal::CONCAT;
- SetConcatChildren(rep, left, right);
-
- return rep;
-}
-
-static CordRep* Concat(CordRep* left, CordRep* right) {
- CordRep* rep = RawConcat(left, right);
- if (rep != nullptr && !IsRootBalanced(rep)) {
- rep = Rebalance(rep);
- }
- return VerifyTree(rep);
-}
-
-// Make a balanced tree out of an array of leaf nodes.
-static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
- // Make repeated passes over the array, merging adjacent pairs
- // until we are left with just a single node.
- while (n > 1) {
- size_t dst = 0;
- for (size_t src = 0; src < n; src += 2) {
- if (src + 1 < n) {
- reps[dst] = Concat(reps[src], reps[src + 1]);
- } else {
- reps[dst] = reps[src];
- }
- dst++;
- }
- n = dst;
- }
-
- return reps[0];
-}
-
static CordRepFlat* CreateFlat(const char* data, size_t length,
size_t alloc_hint) {
CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
@@ -229,21 +113,7 @@ static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) {
// The returned node has a refcount of 1.
static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) {
if (length == 0) return nullptr;
- if (btree_enabled()) {
- return NewBtree(data, length, alloc_hint);
- }
- y_absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
- size_t n = 0;
- do {
- const size_t len = std::min(length, kMaxFlatLength);
- CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
- rep->length = len;
- memcpy(rep->Data(), data, len);
- reps[n++] = VerifyTree(rep);
- data += len;
- length -= len;
- } while (length != 0);
- return MakeBalancedTree(reps.data(), n);
+ return NewBtree(data, length, alloc_hint);
}
namespace cord_internal {
@@ -258,22 +128,6 @@ void InitializeCordRepExternal(y_absl::string_view data, CordRepExternal* rep) {
} // namespace cord_internal
-static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
- // Never create empty substring nodes
- if (length == 0) {
- CordRep::Unref(child);
- return nullptr;
- } else {
- CordRepSubstring* rep = new CordRepSubstring();
- assert((offset + length) <= child->length);
- rep->length = length;
- rep->tag = cord_internal::SUBSTRING;
- rep->start = offset;
- rep->child = child;
- return VerifyTree(rep);
- }
-}
-
// Creates a CordRep from the provided string. If the string is large enough,
// and not wasteful, we move the string into an external cord rep, preserving
// the already allocated string contents.
@@ -306,13 +160,14 @@ static CordRep* CordRepFromString(TString&& src) {
// --------------------------------------------------------------------
// Cord::InlineRep functions
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr unsigned char Cord::InlineRep::kMaxInline;
+#endif
-inline void Cord::InlineRep::set_data(const char* data, size_t n,
- bool nullify_tail) {
+inline void Cord::InlineRep::set_data(const char* data, size_t n) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
- cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail);
+ cord_internal::SmallMemmove<true>(data_.as_chars(), data, n);
set_inline_size(n);
}
@@ -341,7 +196,9 @@ inline void Cord::InlineRep::remove_prefix(size_t n) {
// Returns `rep` converted into a CordRepBtree.
// Directly returns `rep` if `rep` is already a CordRepBtree.
static CordRepBtree* ForceBtree(CordRep* rep) {
- return rep->IsBtree() ? rep->btree() : CordRepBtree::Create(rep);
+ return rep->IsBtree()
+ ? rep->btree()
+ : CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
}
void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
@@ -349,11 +206,7 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
- if (btree_enabled()) {
- tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
- } else {
- tree = Concat(flat, tree);
- }
+ tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
@@ -361,16 +214,14 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
void Cord::InlineRep::AppendTreeToTree(CordRep* tree, MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
- if (btree_enabled()) {
- tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
- } else {
- tree = Concat(data_.as_tree(), tree);
- }
+ tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::AppendTree(CordRep* tree, MethodIdentifier method) {
- if (tree == nullptr) return;
+ assert(tree != nullptr);
+ assert(tree->length != 0);
+ assert(!tree->IsCrc());
if (data_.is_tree()) {
AppendTreeToTree(tree, method);
} else {
@@ -383,11 +234,7 @@ void Cord::InlineRep::PrependTreeToInlined(CordRep* tree,
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
- if (btree_enabled()) {
- tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
- } else {
- tree = Concat(tree, flat);
- }
+ tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
@@ -396,16 +243,14 @@ void Cord::InlineRep::PrependTreeToTree(CordRep* tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
- if (btree_enabled()) {
- tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
- } else {
- tree = Concat(tree, data_.as_tree());
- }
+ tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
assert(tree != nullptr);
+ assert(tree->length != 0);
+ assert(!tree->IsCrc());
if (data_.is_tree()) {
PrependTreeToTree(tree, method);
} else {
@@ -419,7 +264,7 @@ void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
// written to region and the actual size increase will be written to size.
static inline bool PrepareAppendRegion(CordRep* root, char** region,
size_t* size, size_t max_length) {
- if (root->IsBtree() && root->refcount.IsMutable()) {
+ if (root->IsBtree() && root->refcount.IsOne()) {
Span<char> span = root->btree()->GetAppendBuffer(max_length);
if (!span.empty()) {
*region = span.data();
@@ -428,13 +273,8 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
}
}
- // Search down the right-hand path for a non-full FLAT node.
CordRep* dst = root;
- while (dst->IsConcat() && dst->refcount.IsMutable()) {
- dst = dst->concat()->right;
- }
-
- if (!dst->IsFlat() || !dst->refcount.IsMutable()) {
+ if (!dst->IsFlat() || !dst->refcount.IsOne()) {
*region = nullptr;
*size = 0;
return false;
@@ -448,12 +288,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
return false;
}
- size_t size_increase = std::min(capacity - in_use, max_length);
-
- // We need to update the length fields for all nodes, including the leaf node.
- for (CordRep* rep = root; rep != dst; rep = rep->concat()->right) {
- rep->length += size_increase;
- }
+ const size_t size_increase = std::min(capacity - in_use, max_length);
dst->length += size_increase;
*region = dst->flat()->Data() + in_use;
@@ -461,90 +296,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
return true;
}
-template <bool has_length>
-void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
- size_t length) {
- auto constexpr method = CordzUpdateTracker::kGetAppendRegion;
-
- CordRep* root = tree();
- size_t sz = root ? root->length : inline_size();
- if (root == nullptr) {
- size_t available = kMaxInline - sz;
- if (available >= (has_length ? length : 1)) {
- *region = data_.as_chars() + sz;
- *size = has_length ? length : available;
- set_inline_size(has_length ? sz + length : kMaxInline);
- return;
- }
- }
-
- size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength);
- CordRep* rep = root ? root : MakeFlatWithExtraCapacity(extra);
- CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method);
- if (PrepareAppendRegion(rep, region, size, length)) {
- CommitTree(root, rep, scope, method);
- return;
- }
-
- // Allocate new node.
- CordRepFlat* new_node = CordRepFlat::New(extra);
- new_node->length = std::min(new_node->Capacity(), length);
- *region = new_node->Data();
- *size = new_node->length;
-
- if (btree_enabled()) {
- rep = CordRepBtree::Append(ForceBtree(rep), new_node);
- } else {
- rep = Concat(rep, new_node);
- }
- CommitTree(root, rep, scope, method);
-}
-
-// Computes the memory side of the provided edge which must be a valid data edge
-// for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node.
-static bool RepMemoryUsageDataEdge(const CordRep* rep,
- size_t* total_mem_usage) {
- size_t maybe_sub_size = 0;
- if (Y_ABSL_PREDICT_FALSE(rep->IsSubstring())) {
- maybe_sub_size = sizeof(cord_internal::CordRepSubstring);
- rep = rep->substring()->child;
- }
- if (rep->IsFlat()) {
- *total_mem_usage += maybe_sub_size + rep->flat()->AllocatedSize();
- return true;
- }
- if (rep->IsExternal()) {
- // We don't know anything about the embedded / bound data, but we can safely
- // assume it is 'at least' a word / pointer to data. In the future we may
- // choose to use the 'data' byte as a tag to identify the types of some
- // well-known externals, such as a TString instance.
- *total_mem_usage += maybe_sub_size +
- sizeof(cord_internal::CordRepExternalImpl<intptr_t>) +
- rep->length;
- return true;
- }
- return false;
-}
-
-// If the rep is a leaf, this will increment the value at total_mem_usage and
-// will return true.
-static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
- if (rep->IsFlat()) {
- *total_mem_usage += rep->flat()->AllocatedSize();
- return true;
- }
- if (rep->IsExternal()) {
- // We don't know anything about the embedded / bound data, but we can safely
- // assume it is 'at least' a word / pointer to data. In the future we may
- // choose to use the 'data' byte as a tag to identify the types of some
- // well-known externals, such as a TString instance.
- *total_mem_usage +=
- sizeof(cord_internal::CordRepExternalImpl<intptr_t>) + rep->length;
- return true;
- }
- return false;
-}
-
void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
assert(&src != this);
assert(is_tree() || src.is_tree());
@@ -581,7 +332,7 @@ Cord::Cord(y_absl::string_view src, MethodIdentifier method)
: contents_(InlineData::kDefaultInit) {
const size_t n = src.size();
if (n <= InlineRep::kMaxInline) {
- contents_.set_data(src.data(), n, true);
+ contents_.set_data(src.data(), n);
} else {
CordRep* rep = NewTree(src.data(), n, 0);
contents_.EmplaceTree(rep, method);
@@ -591,7 +342,7 @@ Cord::Cord(y_absl::string_view src, MethodIdentifier method)
template <typename T, Cord::EnableIfString<T>>
Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) {
if (src.size() <= InlineRep::kMaxInline) {
- contents_.set_data(src.data(), src.size(), true);
+ contents_.set_data(src.data(), src.size());
} else {
CordRep* rep = CordRepFromString(std::forward<T>(src));
contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString);
@@ -642,14 +393,14 @@ Cord& Cord::operator=(y_absl::string_view src) {
// - MaybeUntrackCord must be called before set_data() clobbers cordz_info.
// - set_data() must be called before Unref(tree) as it may reference tree.
if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info());
- contents_.set_data(data, length, true);
+ contents_.set_data(data, length);
if (tree != nullptr) CordRep::Unref(tree);
return *this;
}
if (tree != nullptr) {
CordzUpdateScope scope(contents_.cordz_info(), method);
if (tree->IsFlat() && tree->flat()->Capacity() >= length &&
- tree->refcount.IsMutable()) {
+ tree->refcount.IsOne()) {
// Copy in place if the existing FLAT node is reusable.
memmove(tree->flat()->Data(), data, length);
tree->length = length;
@@ -675,6 +426,7 @@ void Cord::InlineRep::AppendArray(y_absl::string_view src,
const CordRep* const root = rep;
CordzUpdateScope scope(root ? cordz_info() : nullptr, method);
if (root != nullptr) {
+ rep = cord_internal::RemoveCrcNode(rep);
char* region;
if (PrepareAppendRegion(rep, &region, &appended, src.size())) {
memcpy(region, src.data(), appended);
@@ -705,27 +457,11 @@ void Cord::InlineRep::AppendArray(y_absl::string_view src,
return;
}
- if (btree_enabled()) {
- // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
- rep = ForceBtree(rep);
- const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
- rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
- } else {
- // Use new block(s) for any remaining bytes that were not handled above.
- // Alloc extra memory only if the right child of the root of the new tree
- // is going to be a FLAT node, which will permit further inplace appends.
- size_t length = src.size();
- if (src.size() < kMaxFlatLength) {
- // The new length is either
- // - old size + 10%
- // - old_size + src.size()
- // This will cause a reasonable conservative step-up in size that is
- // still large enough to avoid excessive amounts of small fragments
- // being added.
- length = std::max<size_t>(rep->length / 10, src.size());
- }
- rep = Concat(rep, NewTree(src.data(), src.size(), length - src.size()));
- }
+ // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
+ rep = ForceBtree(rep);
+ const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
+ rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
+
CommitTree(root, rep, scope, method);
}
@@ -746,7 +482,8 @@ inline void Cord::AppendImpl(C&& src) {
// Since destination is empty, we can avoid allocating a node,
if (src.contents_.is_tree()) {
// by taking the tree directly
- CordRep* rep = std::forward<C>(src).TakeRep();
+ CordRep* rep =
+ cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.EmplaceTree(rep, method);
} else {
// or copying over inline data
@@ -782,10 +519,50 @@ inline void Cord::AppendImpl(C&& src) {
}
// Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
- CordRep* rep = std::forward<C>(src).TakeRep();
+ CordRep* rep = cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
}
+static CordRep::ExtractResult ExtractAppendBuffer(CordRep* rep,
+ size_t min_capacity) {
+ switch (rep->tag) {
+ case cord_internal::BTREE:
+ return CordRepBtree::ExtractAppendBuffer(rep->btree(), min_capacity);
+ default:
+ if (rep->IsFlat() && rep->refcount.IsOne() &&
+ rep->flat()->Capacity() - rep->length >= min_capacity) {
+ return {nullptr, rep};
+ }
+ return {rep, nullptr};
+ }
+}
+
+static CordBuffer CreateAppendBuffer(InlineData& data, size_t capacity) {
+ // Watch out for overflow, people can ask for size_t::max().
+ const size_t size = data.inline_size();
+ capacity = (std::min)(std::numeric_limits<size_t>::max() - size, capacity);
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(size + capacity);
+ cord_internal::SmallMemmove(buffer.data(), data.as_chars(), size);
+ buffer.SetLength(size);
+ data = {};
+ return buffer;
+}
+
+CordBuffer Cord::GetAppendBufferSlowPath(size_t capacity, size_t min_capacity) {
+ auto constexpr method = CordzUpdateTracker::kGetAppendBuffer;
+ CordRep* tree = contents_.tree();
+ if (tree != nullptr) {
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ CordRep::ExtractResult result = ExtractAppendBuffer(tree, min_capacity);
+ if (result.extracted != nullptr) {
+ contents_.SetTreeOrEmpty(result.tree, scope);
+ return CordBuffer(result.extracted->flat());
+ }
+ return CordBuffer::CreateWithDefaultLimit(capacity);
+ }
+ return CreateAppendBuffer(contents_.data_, capacity);
+}
+
void Cord::Append(const Cord& src) {
AppendImpl(src);
}
@@ -810,7 +587,8 @@ void Cord::Prepend(const Cord& src) {
CordRep* src_tree = src.contents_.tree();
if (src_tree != nullptr) {
CordRep::Ref(src_tree);
- contents_.PrependTree(src_tree, CordzUpdateTracker::kPrependCord);
+ contents_.PrependTree(cord_internal::RemoveCrcNode(src_tree),
+ CordzUpdateTracker::kPrependCord);
return;
}
@@ -837,103 +615,45 @@ void Cord::PrependArray(y_absl::string_view src, MethodIdentifier method) {
contents_.PrependTree(rep, method);
}
-template <typename T, Cord::EnableIfString<T>>
-inline void Cord::Prepend(T&& src) {
- if (src.size() <= kMaxBytesToCopy) {
- Prepend(y_absl::string_view(src));
+void Cord::AppendPrecise(y_absl::string_view src, MethodIdentifier method) {
+ assert(!src.empty());
+ assert(src.size() <= cord_internal::kMaxFlatLength);
+ if (contents_.remaining_inline_capacity() >= src.size()) {
+ const size_t inline_length = contents_.inline_size();
+ memcpy(contents_.data_.as_chars() + inline_length, src.data(), src.size());
+ contents_.set_inline_size(inline_length + src.size());
} else {
- CordRep* rep = CordRepFromString(std::forward<T>(src));
- contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
+ contents_.AppendTree(CordRepFlat::Create(src), method);
}
}
-template void Cord::Prepend(TString&& src);
-
-static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
- if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
- y_absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
-
- while (node->IsConcat()) {
- assert(n <= node->length);
- if (n < node->concat()->left->length) {
- // Push right to stack, descend left.
- rhs_stack.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Drop left, descend right.
- n -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
- assert(n <= node->length);
-
- if (n == 0) {
- CordRep::Ref(node);
+void Cord::PrependPrecise(y_absl::string_view src, MethodIdentifier method) {
+ assert(!src.empty());
+ assert(src.size() <= cord_internal::kMaxFlatLength);
+ if (contents_.remaining_inline_capacity() >= src.size()) {
+ const size_t inline_length = contents_.inline_size();
+ char data[InlineRep::kMaxInline + 1] = {0};
+ memcpy(data, src.data(), src.size());
+ memcpy(data + src.size(), contents_.data(), inline_length);
+ memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1);
+ contents_.set_inline_size(inline_length + src.size());
} else {
- size_t start = n;
- size_t len = node->length - n;
- if (node->IsSubstring()) {
- // Consider in-place update of node, similar to in RemoveSuffixFrom().
- start += node->substring()->start;
- node = node->substring()->child;
- }
- node = NewSubstring(CordRep::Ref(node), start, len);
- }
- while (!rhs_stack.empty()) {
- node = Concat(node, CordRep::Ref(rhs_stack.back()));
- rhs_stack.pop_back();
+ contents_.PrependTree(CordRepFlat::Create(src), method);
}
- return node;
}
-// RemoveSuffixFrom() is very similar to RemovePrefixFrom(), with the
-// exception that removing a suffix has an optimization where a node may be
-// edited in place iff that node and all its ancestors have a refcount of 1.
-static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
- if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
- y_absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
- bool inplace_ok = node->refcount.IsMutable();
-
- while (node->IsConcat()) {
- assert(n <= node->length);
- if (n < node->concat()->right->length) {
- // Push left to stack, descend right.
- lhs_stack.push_back(node->concat()->left);
- node = node->concat()->right;
- } else {
- // Drop right, descend left.
- n -= node->concat()->right->length;
- node = node->concat()->left;
- }
- inplace_ok = inplace_ok && node->refcount.IsMutable();
- }
- assert(n <= node->length);
-
- if (n == 0) {
- CordRep::Ref(node);
- } else if (inplace_ok && !node->IsExternal()) {
- // Consider making a new buffer if the current node capacity is much
- // larger than the new length.
- CordRep::Ref(node);
- node->length -= n;
+template <typename T, Cord::EnableIfString<T>>
+inline void Cord::Prepend(T&& src) {
+ if (src.size() <= kMaxBytesToCopy) {
+ Prepend(y_absl::string_view(src));
} else {
- size_t start = 0;
- size_t len = node->length - n;
- if (node->IsSubstring()) {
- start = node->substring()->start;
- node = node->substring()->child;
- }
- node = NewSubstring(CordRep::Ref(node), start, len);
- }
- while (!lhs_stack.empty()) {
- node = Concat(CordRep::Ref(lhs_stack.back()), node);
- lhs_stack.pop_back();
+ CordRep* rep = CordRepFromString(std::forward<T>(src));
+ contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
}
- return node;
}
+template void Cord::Prepend(TString&& src);
+
void Cord::RemovePrefix(size_t n) {
Y_ABSL_INTERNAL_CHECK(n <= size(),
y_absl::StrCat("Requested prefix size ", n,
@@ -944,14 +664,21 @@ void Cord::RemovePrefix(size_t n) {
} else {
auto constexpr method = CordzUpdateTracker::kRemovePrefix;
CordzUpdateScope scope(contents_.cordz_info(), method);
- if (tree->IsBtree()) {
+ tree = cord_internal::RemoveCrcNode(tree);
+ if (n >= tree->length) {
+ CordRep::Unref(tree);
+ tree = nullptr;
+ } else if (tree->IsBtree()) {
CordRep* old = tree;
tree = tree->btree()->SubTree(n, tree->length - n);
CordRep::Unref(old);
+ } else if (tree->IsSubstring() && tree->refcount.IsOne()) {
+ tree->substring()->start += n;
+ tree->length -= n;
} else {
- CordRep* newrep = RemovePrefixFrom(tree, n);
+ CordRep* rep = CordRepSubstring::Substring(tree, n, tree->length - n);
CordRep::Unref(tree);
- tree = VerifyTree(newrep);
+ tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
@@ -967,68 +694,24 @@ void Cord::RemoveSuffix(size_t n) {
} else {
auto constexpr method = CordzUpdateTracker::kRemoveSuffix;
CordzUpdateScope scope(contents_.cordz_info(), method);
- if (tree->IsBtree()) {
+ tree = cord_internal::RemoveCrcNode(tree);
+ if (n >= tree->length) {
+ CordRep::Unref(tree);
+ tree = nullptr;
+ } else if (tree->IsBtree()) {
tree = CordRepBtree::RemoveSuffix(tree->btree(), n);
+ } else if (!tree->IsExternal() && tree->refcount.IsOne()) {
+ assert(tree->IsFlat() || tree->IsSubstring());
+ tree->length -= n;
} else {
- CordRep* newrep = RemoveSuffixFrom(tree, n);
+ CordRep* rep = CordRepSubstring::Substring(tree, 0, tree->length - n);
CordRep::Unref(tree);
- tree = VerifyTree(newrep);
+ tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
}
-// Work item for NewSubRange().
-struct SubRange {
- SubRange(CordRep* a_node, size_t a_pos, size_t a_n)
- : node(a_node), pos(a_pos), n(a_n) {}
- CordRep* node; // nullptr means concat last 2 results.
- size_t pos;
- size_t n;
-};
-
-static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
- y_absl::InlinedVector<CordRep*, kInlinedVectorSize> results;
- y_absl::InlinedVector<SubRange, kInlinedVectorSize> todo;
- todo.push_back(SubRange(node, pos, n));
- do {
- const SubRange& sr = todo.back();
- node = sr.node;
- pos = sr.pos;
- n = sr.n;
- todo.pop_back();
-
- if (node == nullptr) {
- assert(results.size() >= 2);
- CordRep* right = results.back();
- results.pop_back();
- CordRep* left = results.back();
- results.pop_back();
- results.push_back(Concat(left, right));
- } else if (pos == 0 && n == node->length) {
- results.push_back(CordRep::Ref(node));
- } else if (!node->IsConcat()) {
- if (node->IsSubstring()) {
- pos += node->substring()->start;
- node = node->substring()->child;
- }
- results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
- } else if (pos + n <= node->concat()->left->length) {
- todo.push_back(SubRange(node->concat()->left, pos, n));
- } else if (pos >= node->concat()->left->length) {
- pos -= node->concat()->left->length;
- todo.push_back(SubRange(node->concat()->right, pos, n));
- } else {
- size_t left_n = node->concat()->left->length - pos;
- todo.push_back(SubRange(nullptr, 0, 0)); // Concat()
- todo.push_back(SubRange(node->concat()->right, 0, n - left_n));
- todo.push_back(SubRange(node->concat()->left, pos, left_n));
- }
- } while (!todo.empty());
- assert(results.size() == 1);
- return results[0];
-}
-
Cord Cord::Subcord(size_t pos, size_t new_size) const {
Cord sub_cord;
size_t length = size();
@@ -1038,9 +721,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
CordRep* tree = contents_.tree();
if (tree == nullptr) {
- // sub_cord is newly constructed, no need to re-zero-out the tail of
- // contents_ memory.
- sub_cord.contents_.set_data(contents_.data() + pos, new_size, false);
+ sub_cord.contents_.set_data(contents_.data() + pos, new_size);
return sub_cord;
}
@@ -1060,10 +741,11 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
return sub_cord;
}
+ tree = cord_internal::SkipCrcNode(tree);
if (tree->IsBtree()) {
tree = tree->btree()->SubTree(pos, new_size);
} else {
- tree = NewSubRange(tree, pos, new_size);
+ tree = CordRepSubstring::Substring(tree, pos, new_size);
}
sub_cord.contents_.EmplaceTree(tree, contents_.data_,
CordzUpdateTracker::kSubCord);
@@ -1071,146 +753,6 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
}
// --------------------------------------------------------------------
-// Balancing
-
-class CordForest {
- public:
- explicit CordForest(size_t length)
- : root_length_(length), trees_(kMinLengthSize, nullptr) {}
-
- void Build(CordRep* cord_root) {
- std::vector<CordRep*> pending = {cord_root};
-
- while (!pending.empty()) {
- CordRep* node = pending.back();
- pending.pop_back();
- CheckNode(node);
- if (Y_ABSL_PREDICT_FALSE(!node->IsConcat())) {
- AddNode(node);
- continue;
- }
-
- CordRepConcat* concat_node = node->concat();
- if (concat_node->depth() >= kMinLengthSize ||
- concat_node->length < min_length[concat_node->depth()]) {
- pending.push_back(concat_node->right);
- pending.push_back(concat_node->left);
-
- if (concat_node->refcount.IsOne()) {
- concat_node->left = concat_freelist_;
- concat_freelist_ = concat_node;
- } else {
- CordRep::Ref(concat_node->right);
- CordRep::Ref(concat_node->left);
- CordRep::Unref(concat_node);
- }
- } else {
- AddNode(node);
- }
- }
- }
-
- CordRep* ConcatNodes() {
- CordRep* sum = nullptr;
- for (auto* node : trees_) {
- if (node == nullptr) continue;
-
- sum = PrependNode(node, sum);
- root_length_ -= node->length;
- if (root_length_ == 0) break;
- }
- Y_ABSL_INTERNAL_CHECK(sum != nullptr, "Failed to locate sum node");
- return VerifyTree(sum);
- }
-
- private:
- CordRep* AppendNode(CordRep* node, CordRep* sum) {
- return (sum == nullptr) ? node : MakeConcat(sum, node);
- }
-
- CordRep* PrependNode(CordRep* node, CordRep* sum) {
- return (sum == nullptr) ? node : MakeConcat(node, sum);
- }
-
- void AddNode(CordRep* node) {
- CordRep* sum = nullptr;
-
- // Collect together everything with which we will merge with node
- int i = 0;
- for (; node->length > min_length[i + 1]; ++i) {
- auto& tree_at_i = trees_[i];
-
- if (tree_at_i == nullptr) continue;
- sum = PrependNode(tree_at_i, sum);
- tree_at_i = nullptr;
- }
-
- sum = AppendNode(node, sum);
-
- // Insert sum into appropriate place in the forest
- for (; sum->length >= min_length[i]; ++i) {
- auto& tree_at_i = trees_[i];
- if (tree_at_i == nullptr) continue;
-
- sum = MakeConcat(tree_at_i, sum);
- tree_at_i = nullptr;
- }
-
- // min_length[0] == 1, which means sum->length >= min_length[0]
- assert(i > 0);
- trees_[i - 1] = sum;
- }
-
- // Make concat node trying to resue existing CordRepConcat nodes we
- // already collected in the concat_freelist_.
- CordRep* MakeConcat(CordRep* left, CordRep* right) {
- if (concat_freelist_ == nullptr) return RawConcat(left, right);
-
- CordRepConcat* rep = concat_freelist_;
- if (concat_freelist_->left == nullptr) {
- concat_freelist_ = nullptr;
- } else {
- concat_freelist_ = concat_freelist_->left->concat();
- }
- SetConcatChildren(rep, left, right);
-
- return rep;
- }
-
- static void CheckNode(CordRep* node) {
- Y_ABSL_INTERNAL_CHECK(node->length != 0u, "");
- if (node->IsConcat()) {
- Y_ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, "");
- Y_ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, "");
- Y_ABSL_INTERNAL_CHECK(node->length == (node->concat()->left->length +
- node->concat()->right->length),
- "");
- }
- }
-
- size_t root_length_;
-
- // use an inlined vector instead of a flat array to get bounds checking
- y_absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_;
-
- // List of concat nodes we can re-use for Cord balancing.
- CordRepConcat* concat_freelist_ = nullptr;
-};
-
-static CordRep* Rebalance(CordRep* node) {
- VerifyTree(node);
- assert(node->IsConcat());
-
- if (node->length == 0) {
- return nullptr;
- }
-
- CordForest forest(node->length);
- forest.Build(node);
- return forest.ConcatNodes();
-}
-
-// --------------------------------------------------------------------
// Comparators
namespace {
@@ -1256,7 +798,7 @@ inline y_absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return y_absl::string_view(data_.as_chars(), data_.inline_size());
}
- CordRep* node = tree();
+ CordRep* node = cord_internal::SkipCrcNode(tree());
if (node->IsFlat()) {
return y_absl::string_view(node->flat()->Data(), node->length);
}
@@ -1274,11 +816,6 @@ inline y_absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return tree->Data(tree->begin());
}
- // Walk down the left branches until we hit a non-CONCAT node.
- while (node->IsConcat()) {
- node = node->concat()->left;
- }
-
// Get the child node if we encounter a SUBSTRING.
size_t offset = 0;
size_t length = node->length;
@@ -1298,6 +835,28 @@ inline y_absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return y_absl::string_view(node->external()->base + offset, length);
}
+void Cord::SetExpectedChecksum(uint32_t crc) {
+ auto constexpr method = CordzUpdateTracker::kSetExpectedChecksum;
+ if (empty()) return;
+
+ if (!contents_.is_tree()) {
+ CordRep* rep = contents_.MakeFlatWithExtraCapacity(0);
+ rep = CordRepCrc::New(rep, crc);
+ contents_.EmplaceTree(rep, method);
+ } else {
+ const CordzUpdateScope scope(contents_.data_.cordz_info(), method);
+ CordRep* rep = CordRepCrc::New(contents_.data_.as_tree(), crc);
+ contents_.SetTree(rep, scope);
+ }
+}
+
+y_absl::optional<uint32_t> Cord::ExpectedChecksum() const {
+ if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
+ return y_absl::nullopt;
+ }
+ return contents_.tree()->crc()->crc;
+}
+
inline int Cord::CompareSlowPath(y_absl::string_view rhs, size_t compared_size,
size_t size_to_compare) const {
auto advance = [](Cord::ChunkIterator* it, y_absl::string_view* chunk) {
@@ -1473,42 +1032,6 @@ void Cord::CopyToArraySlowPath(char* dst) const {
}
}
-Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
- auto& stack_of_right_children = stack_of_right_children_;
- if (stack_of_right_children.empty()) {
- assert(!current_chunk_.empty()); // Called on invalid iterator.
- // We have reached the end of the Cord.
- return *this;
- }
-
- // Process the next node on the stack.
- CordRep* node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
-
- // Walk down the left branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
-
- assert(node->IsExternal() || node->IsFlat());
- assert(length != 0);
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = y_absl::string_view(data + offset, length);
- current_leaf_ = node;
- return *this;
-}
-
Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
Y_ABSL_HARDENING_ASSERT(bytes_remaining_ >= n &&
"Attempted to iterate past `end()`");
@@ -1551,166 +1074,33 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
return subcord;
}
- auto& stack_of_right_children = stack_of_right_children_;
- if (n < current_chunk_.size()) {
- // Range to read is a proper subrange of the current chunk.
- assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
- const char* data = subnode->IsExternal() ? subnode->external()->base
- : subnode->flat()->Data();
- subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
- RemoveChunkPrefix(n);
- return subcord;
- }
-
- // Range to read begins with a proper subrange of the current chunk.
- assert(!current_chunk_.empty());
+ // Short circuit if reading the entire data edge.
assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
- if (current_chunk_.size() < subnode->length) {
- const char* data = subnode->IsExternal() ? subnode->external()->base
- : subnode->flat()->Data();
- subnode = NewSubstring(subnode, current_chunk_.data() - data,
- current_chunk_.size());
- }
- n -= current_chunk_.size();
- bytes_remaining_ -= current_chunk_.size();
-
- // Process the next node(s) on the stack, reading whole subtrees depending on
- // their length and how many bytes we are advancing.
- CordRep* node = nullptr;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
- if (node->length > n) break;
- // TODO(qrczak): This might unnecessarily recreate existing concat nodes.
- // Avoiding that would need pretty complicated logic (instead of
- // current_leaf, keep current_subtree_ which points to the highest node
- // such that the current leaf can be found on the path of left children
- // starting from current_subtree_; delay creating subnode while node is
- // below current_subtree_; find the proper node along the path of left
- // children starting from current_subtree_ if this loop exits while staying
- // below current_subtree_; etc.; alternatively, push parents instead of
- // right children on the stack).
- subnode = Concat(subnode, CordRep::Ref(node));
- n -= node->length;
- bytes_remaining_ -= node->length;
- node = nullptr;
- }
-
- if (node == nullptr) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ if (n == current_leaf_->length) {
+ bytes_remaining_ = 0;
+ current_chunk_ = {};
+ CordRep* tree = CordRep::Ref(current_leaf_);
+ subcord.contents_.EmplaceTree(VerifyTree(tree), method);
return subcord;
}
- // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- if (node->concat()->left->length > n) {
- // Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Read left, descend right.
- subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
- n -= node->concat()->left->length;
- bytes_remaining_ -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
+ // From this point on, we need a partial substring node.
+ // Get pointer to the underlying flat or external data payload and
+ // compute data pointer and offset into current flat or external.
+ CordRep* payload = current_leaf_->IsSubstring()
+ ? current_leaf_->substring()->child
+ : current_leaf_;
+ const char* data = payload->IsExternal() ? payload->external()->base
+ : payload->flat()->Data();
+ const size_t offset = current_chunk_.data() - data;
- // Range to read ends with a proper (possibly empty) subrange of the current
- // chunk.
- assert(node->IsExternal() || node->IsFlat());
- assert(length > n);
- if (n > 0) {
- subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
- }
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = y_absl::string_view(data + offset + n, length - n);
- current_leaf_ = node;
+ auto* tree = CordRepSubstring::Substring(payload, offset, n);
+ subcord.contents_.EmplaceTree(VerifyTree(tree), method);
bytes_remaining_ -= n;
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ current_chunk_.remove_prefix(n);
return subcord;
}
-void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
- assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`");
- assert(n >= current_chunk_.size()); // This should only be called when
- // iterating to a new node.
-
- n -= current_chunk_.size();
- bytes_remaining_ -= current_chunk_.size();
-
- if (stack_of_right_children_.empty()) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- return;
- }
-
- // Process the next node(s) on the stack, skipping whole subtrees depending on
- // their length and how many bytes we are advancing.
- CordRep* node = nullptr;
- auto& stack_of_right_children = stack_of_right_children_;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
- if (node->length > n) break;
- n -= node->length;
- bytes_remaining_ -= node->length;
- node = nullptr;
- }
-
- if (node == nullptr) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- return;
- }
-
- // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- if (node->concat()->left->length > n) {
- // Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Skip left, descend right.
- n -= node->concat()->left->length;
- bytes_remaining_ -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
-
- assert(node->IsExternal() || node->IsFlat());
- assert(length > n);
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = y_absl::string_view(data + offset + n, length - n);
- current_leaf_ = node;
- bytes_remaining_ -= n;
-}
-
char Cord::operator[](size_t i) const {
Y_ABSL_HARDENING_ASSERT(i < size());
size_t offset = i;
@@ -1718,6 +1108,7 @@ char Cord::operator[](size_t i) const {
if (rep == nullptr) {
return contents_.data()[i];
}
+ rep = cord_internal::SkipCrcNode(rep);
while (true) {
assert(rep != nullptr);
assert(offset < rep->length);
@@ -1729,16 +1120,6 @@ char Cord::operator[](size_t i) const {
} else if (rep->IsExternal()) {
// Get the "i"th character from the external array.
return rep->external()->base[offset];
- } else if (rep->IsConcat()) {
- // Recursively branch to the side of the concatenation that the "i"th
- // character is on.
- size_t left_length = rep->concat()->left->length;
- if (offset < left_length) {
- rep = rep->concat()->left;
- } else {
- offset -= left_length;
- rep = rep->concat()->right;
- }
} else {
// This must be a substring a node, so bypass it to get to the child.
assert(rep->IsSubstring());
@@ -1778,6 +1159,7 @@ y_absl::string_view Cord::FlattenSlowPath() {
/* static */ bool Cord::GetFlatAux(CordRep* rep, y_absl::string_view* fragment) {
assert(rep != nullptr);
+ rep = cord_internal::SkipCrcNode(rep);
if (rep->IsFlat()) {
*fragment = y_absl::string_view(rep->flat()->Data(), rep->length);
return true;
@@ -1807,6 +1189,9 @@ y_absl::string_view Cord::FlattenSlowPath() {
/* static */ void Cord::ForEachChunkAux(
y_absl::cord_internal::CordRep* rep,
y_absl::FunctionRef<void(y_absl::string_view)> callback) {
+ assert(rep != nullptr);
+ rep = cord_internal::SkipCrcNode(rep);
+
if (rep->IsBtree()) {
ChunkIterator it(rep), end;
while (it != end) {
@@ -1816,44 +1201,13 @@ y_absl::string_view Cord::FlattenSlowPath() {
return;
}
- assert(rep != nullptr);
- int stack_pos = 0;
- constexpr int stack_max = 128;
- // Stack of right branches for tree traversal
- y_absl::cord_internal::CordRep* stack[stack_max];
- y_absl::cord_internal::CordRep* current_node = rep;
- while (true) {
- if (current_node->IsConcat()) {
- if (stack_pos == stack_max) {
- // There's no more room on our stack array to add another right branch,
- // and the idea is to avoid allocations, so call this function
- // recursively to navigate this subtree further. (This is not something
- // we expect to happen in practice).
- ForEachChunkAux(current_node, callback);
-
- // Pop the next right branch and iterate.
- current_node = stack[--stack_pos];
- continue;
- } else {
- // Save the right branch for later traversal and continue down the left
- // branch.
- stack[stack_pos++] = current_node->concat()->right;
- current_node = current_node->concat()->left;
- continue;
- }
- }
- // This is a leaf node, so invoke our callback.
- y_absl::string_view chunk;
- bool success = GetFlatAux(current_node, &chunk);
- assert(success);
- if (success) {
- callback(chunk);
- }
- if (stack_pos == 0) {
- // end of traversal
- return;
- }
- current_node = stack[--stack_pos];
+ // This is a leaf node, so invoke our callback.
+ y_absl::cord_internal::CordRep* current_node = cord_internal::SkipCrcNode(rep);
+ y_absl::string_view chunk;
+ bool success = GetFlatAux(current_node, &chunk);
+ assert(success);
+ if (success) {
+ callback(chunk);
}
}
@@ -1868,14 +1222,11 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
*os << " [";
if (include_data) *os << static_cast<void*>(rep);
*os << "]";
- *os << " " << (IsRootBalanced(rep) ? 'b' : 'u');
*os << " " << std::setw(indent) << "";
- if (rep->IsConcat()) {
- *os << "CONCAT depth=" << Depth(rep) << "\n";
+ if (rep->IsCrc()) {
+ *os << "CRC crc=" << rep->crc()->crc << "\n";
indent += kIndentStep;
- indents.push_back(indent);
- stack.push_back(rep->concat()->right);
- rep = rep->concat()->left;
+ rep = rep->crc()->child;
} else if (rep->IsSubstring()) {
*os << "SUBSTRING @ " << rep->substring()->start << "\n";
indent += kIndentStep;
@@ -1912,7 +1263,7 @@ static TString ReportError(CordRep* root, CordRep* node) {
}
static bool VerifyNode(CordRep* root, CordRep* start_node,
- bool full_validation) {
+ bool /* full_validation */) {
y_absl::InlinedVector<CordRep*, 2> worklist;
worklist.push_back(start_node);
do {
@@ -1922,21 +1273,10 @@ static bool VerifyNode(CordRep* root, CordRep* start_node,
Y_ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node));
if (node != root) {
Y_ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node));
+ Y_ABSL_INTERNAL_CHECK(!node->IsCrc(), ReportError(root, node));
}
- if (node->IsConcat()) {
- Y_ABSL_INTERNAL_CHECK(node->concat()->left != nullptr,
- ReportError(root, node));
- Y_ABSL_INTERNAL_CHECK(node->concat()->right != nullptr,
- ReportError(root, node));
- Y_ABSL_INTERNAL_CHECK((node->length == node->concat()->left->length +
- node->concat()->right->length),
- ReportError(root, node));
- if (full_validation) {
- worklist.push_back(node->concat()->right);
- worklist.push_back(node->concat()->left);
- }
- } else if (node->IsFlat()) {
+ if (node->IsFlat()) {
Y_ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(),
ReportError(root, node));
} else if (node->IsExternal()) {
@@ -1949,75 +1289,17 @@ static bool VerifyNode(CordRep* root, CordRep* start_node,
Y_ABSL_INTERNAL_CHECK(node->substring()->start + node->length <=
node->substring()->child->length,
ReportError(root, node));
+ } else if (node->IsCrc()) {
+ Y_ABSL_INTERNAL_CHECK(node->crc()->child != nullptr,
+ ReportError(root, node));
+ Y_ABSL_INTERNAL_CHECK(node->crc()->length == node->crc()->child->length,
+ ReportError(root, node));
+ worklist.push_back(node->crc()->child);
}
} while (!worklist.empty());
return true;
}
-// Traverses the tree and computes the total memory allocated.
-/* static */ size_t Cord::MemoryUsageAux(const CordRep* rep) {
- size_t total_mem_usage = 0;
-
- // Allow a quick exit for the common case that the root is a leaf.
- if (RepMemoryUsageLeaf(rep, &total_mem_usage)) {
- return total_mem_usage;
- }
-
- // Iterate over the tree. cur_node is never a leaf node and leaf nodes will
- // never be appended to tree_stack. This reduces overhead from manipulating
- // tree_stack.
- y_absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack;
- const CordRep* cur_node = rep;
- while (true) {
- const CordRep* next_node = nullptr;
-
- if (cur_node->IsConcat()) {
- total_mem_usage += sizeof(CordRepConcat);
- const CordRep* left = cur_node->concat()->left;
- if (!RepMemoryUsageLeaf(left, &total_mem_usage)) {
- next_node = left;
- }
-
- const CordRep* right = cur_node->concat()->right;
- if (!RepMemoryUsageLeaf(right, &total_mem_usage)) {
- if (next_node) {
- tree_stack.push_back(next_node);
- }
- next_node = right;
- }
- } else if (cur_node->IsBtree()) {
- total_mem_usage += sizeof(CordRepBtree);
- const CordRepBtree* node = cur_node->btree();
- if (node->height() == 0) {
- for (const CordRep* edge : node->Edges()) {
- RepMemoryUsageDataEdge(edge, &total_mem_usage);
- }
- } else {
- for (const CordRep* edge : node->Edges()) {
- tree_stack.push_back(edge);
- }
- }
- } else {
- // Since cur_node is not a leaf or a concat node it must be a substring.
- assert(cur_node->IsSubstring());
- total_mem_usage += sizeof(CordRepSubstring);
- next_node = cur_node->substring()->child;
- if (RepMemoryUsageLeaf(next_node, &total_mem_usage)) {
- next_node = nullptr;
- }
- }
-
- if (!next_node) {
- if (tree_stack.empty()) {
- return total_mem_usage;
- }
- next_node = tree_stack.back();
- tree_stack.pop_back();
- }
- cur_node = next_node;
- }
-}
-
std::ostream& operator<<(std::ostream& out, const Cord& cord) {
for (y_absl::string_view chunk : cord.Chunks()) {
out.write(chunk.data(), chunk.size());
@@ -2035,7 +1317,6 @@ uint8_t CordTestAccess::LengthToTag(size_t s) {
Y_ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, y_absl::StrCat("Invalid length ", s));
return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
}
-size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); }
size_t CordTestAccess::SizeofCordRepExternal() {
return sizeof(CordRepExternal);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
index 8f14bb6f8c..a30c7b2a19 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
@@ -70,6 +70,7 @@
#include <util/generic/string.h>
#include <type_traits>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/endian.h"
#include "y_absl/base/internal/per_thread_tls.h"
@@ -78,9 +79,13 @@
#include "y_absl/container/inlined_vector.h"
#include "y_absl/functional/function_ref.h"
#include "y_absl/meta/type_traits.h"
+#include "y_absl/strings/cord_analysis.h"
+#include "y_absl/strings/cord_buffer.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
#include "y_absl/strings/internal/cord_rep_btree_reader.h"
+#include "y_absl/strings/internal/cord_rep_crc.h"
#include "y_absl/strings/internal/cord_rep_ring.h"
#include "y_absl/strings/internal/cordz_functions.h"
#include "y_absl/strings/internal/cordz_info.h"
@@ -100,6 +105,20 @@ template <typename Releaser>
Cord MakeCordFromExternal(y_absl::string_view, Releaser&&);
void CopyCordToString(const Cord& src, TString* dst);
+// Cord memory accounting modes
+enum class CordMemoryAccounting {
+ // Counts the *approximate* number of bytes held in full or in part by this
+ // Cord (which may not remain the same between invocations). Cords that share
+ // memory could each be "charged" independently for the same shared memory.
+ kTotal,
+
+ // Counts the *approximate* number of bytes held in full or in part by this
+ // Cord weighted by the sharing ratio of that data. For example, if some data
+ // edge is shared by 4 different Cords, then each cord is attributed 1/4th of
+ // the total memory usage as a 'fair share' of the total memory usage.
+ kFairShare,
+};
+
// Cord
//
// A Cord is a sequence of characters, designed to be more efficient than a
@@ -214,7 +233,7 @@ class Cord {
//
// Releases the Cord data. Any nodes that share data with other Cords, if
// applicable, will have their reference counts reduced by 1.
- void Clear();
+ Y_ABSL_ATTRIBUTE_REINITIALIZES void Clear();
// Cord::Append()
//
@@ -226,6 +245,45 @@ class Cord {
template <typename T, EnableIfString<T> = 0>
void Append(T&& src);
+ // Appends `buffer` to this cord, unless `buffer` has a zero length in which
+ // case this method has no effect on this cord instance.
+ // This method is guaranteed to consume `buffer`.
+ void Append(CordBuffer buffer);
+
+ // Returns a CordBuffer, re-using potential existing capacity in this cord.
+ //
+ // Cord instances may have additional unused capacity in the last (or first)
+ // nodes of the underlying tree to facilitate amortized growth. This method
+ // allows applications to explicitly use this spare capacity if available,
+ // or create a new CordBuffer instance otherwise.
+ // If this cord has a final non-shared node with at least `min_capacity`
+ // available, then this method will return that buffer including its data
+ // contents. I.e.; the returned buffer will have a non-zero length, and
+ // a capacity of at least `buffer.length + min_capacity`. Otherwise, this
+ // method will return `CordBuffer::CreateWithDefaultLimit(capacity)`.
+ //
+ // Below an example of using GetAppendBuffer. Notice that in this example we
+ // use `GetAppendBuffer()` only on the first iteration. As we know nothing
+ // about any initial extra capacity in `cord`, we may be able to use the extra
+ // capacity. But as we add new buffers with fully utilized contents after that
+ // we avoid calling `GetAppendBuffer()` on subsequent iterations: while this
+ // works fine, it results in an unnecessary inspection of cord contents:
+ //
+ // void AppendRandomDataToCord(y_absl::Cord &cord, size_t n) {
+ // bool first = true;
+ // while (n > 0) {
+ // CordBuffer buffer = first ? cord.GetAppendBuffer(n)
+ // : CordBuffer::CreateWithDefaultLimit(n);
+ // y_absl::Span<char> data = buffer.available_up_to(n);
+ // FillRandomValues(data.data(), data.size());
+ // buffer.IncreaseLengthBy(data.size());
+ // cord.Append(std::move(buffer));
+ // n -= data.size();
+ // first = false;
+ // }
+ // }
+ CordBuffer GetAppendBuffer(size_t capacity, size_t min_capacity = 16);
+
// Cord::Prepend()
//
// Prepends data to the Cord, which may come from another Cord or other string
@@ -235,6 +293,11 @@ class Cord {
template <typename T, EnableIfString<T> = 0>
void Prepend(T&& src);
+ // Prepends `buffer` to this cord, unless `buffer` has a zero length in which
+ // case this method has no effect on this cord instance.
+ // This method is guaranteed to consume `buffer`.
+ void Prepend(CordBuffer buffer);
+
// Cord::RemovePrefix()
//
// Removes the first `n` bytes of a Cord.
@@ -270,11 +333,10 @@ class Cord {
// Cord::EstimatedMemoryUsage()
//
- // Returns the *approximate* number of bytes held in full or in part by this
- // Cord (which may not remain the same between invocations). Note that Cords
- // that share memory could each be "charged" independently for the same shared
- // memory.
- size_t EstimatedMemoryUsage() const;
+ // Returns the *approximate* number of bytes held by this cord.
+ // See CordMemoryAccounting for more information on the accounting method.
+ size_t EstimatedMemoryUsage(CordMemoryAccounting accounting_method =
+ CordMemoryAccounting::kTotal) const;
// Cord::Compare()
//
@@ -324,7 +386,7 @@ class Cord {
//----------------------------------------------------------------------------
//
// A `Cord::ChunkIterator` allows iteration over the constituent chunks of its
- // Cord. Such iteration allows you to perform non-const operatons on the data
+ // Cord. Such iteration allows you to perform non-const operations on the data
// of a Cord without modifying it.
//
// Generally, you do not instantiate a `Cord::ChunkIterator` directly;
@@ -372,12 +434,6 @@ class Cord {
using CordRepBtree = y_absl::cord_internal::CordRepBtree;
using CordRepBtreeReader = y_absl::cord_internal::CordRepBtreeReader;
- // Stack of right children of concat nodes that we have to visit.
- // Keep this at the end of the structure to avoid cache-thrashing.
- // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
- // the inlined vector size (47 exists for backward compatibility).
- using Stack = y_absl::InlinedVector<y_absl::cord_internal::CordRep*, 47>;
-
// Constructs a `begin()` iterator from `tree`. `tree` must not be null.
explicit ChunkIterator(cord_internal::CordRep* tree);
@@ -393,17 +449,10 @@ class Cord {
Cord AdvanceAndReadBytes(size_t n);
void AdvanceBytes(size_t n);
- // Stack specific operator++
- ChunkIterator& AdvanceStack();
-
// Btree specific operator++
ChunkIterator& AdvanceBtree();
void AdvanceBytesBtree(size_t n);
- // Iterates `n` bytes, where `n` is expected to be greater than or equal to
- // `current_chunk_.size()`.
- void AdvanceBytesSlowPath(size_t n);
-
// A view into bytes of the current `CordRep`. It may only be a view to a
// suffix of bytes if this is being used by `CharIterator`.
y_absl::string_view current_chunk_;
@@ -416,12 +465,9 @@ class Cord {
// Cord reader for cord btrees. Empty if not traversing a btree.
CordRepBtreeReader btree_reader_;
-
- // See 'Stack' alias definition.
- Stack stack_of_right_children_;
};
- // Cord::ChunkIterator::chunk_begin()
+ // Cord::chunk_begin()
//
// Returns an iterator to the first chunk of the `Cord`.
//
@@ -437,7 +483,7 @@ class Cord {
// }
ChunkIterator chunk_begin() const;
- // Cord::ChunkItertator::chunk_end()
+ // Cord::chunk_end()
//
// Returns an iterator one increment past the last chunk of the `Cord`.
//
@@ -447,7 +493,7 @@ class Cord {
ChunkIterator chunk_end() const;
//----------------------------------------------------------------------------
- // Cord::ChunkIterator::ChunkRange
+ // Cord::ChunkRange
//----------------------------------------------------------------------------
//
// `ChunkRange` is a helper class for iterating over the chunks of the `Cord`,
@@ -461,7 +507,7 @@ class Cord {
class ChunkRange {
public:
// Fulfill minimum c++ container requirements [container.requirements]
- // Theses (partial) container type definitions allow ChunkRange to be used
+ // These (partial) container type definitions allow ChunkRange to be used
// in various utilities expecting a subset of [container.requirements].
// For example, the below enables using `::testing::ElementsAre(...)`
using value_type = y_absl::string_view;
@@ -481,9 +527,9 @@ class Cord {
// Cord::Chunks()
//
- // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks
- // of a `Cord` with a range-based for-loop. For most iteration tasks on a
- // Cord, use `Cord::Chunks()` to retrieve this iterator.
+ // Returns a `Cord::ChunkRange` for iterating over the chunks of a `Cord` with
+ // a range-based for-loop. For most iteration tasks on a Cord, use
+ // `Cord::Chunks()` to retrieve this iterator.
//
// Example:
//
@@ -549,7 +595,7 @@ class Cord {
ChunkIterator chunk_iterator_;
};
- // Cord::CharIterator::AdvanceAndRead()
+ // Cord::AdvanceAndRead()
//
// Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes
// advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
@@ -557,21 +603,21 @@ class Cord {
// valid to pass `char_end()` and `0`.
static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes);
- // Cord::CharIterator::Advance()
+ // Cord::Advance()
//
// Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
// or equal to the number of bytes remaining within the Cord; otherwise,
// behavior is undefined. It is valid to pass `char_end()` and `0`.
static void Advance(CharIterator* it, size_t n_bytes);
- // Cord::CharIterator::ChunkRemaining()
+ // Cord::ChunkRemaining()
//
// Returns the longest contiguous view starting at the iterator's position.
//
// `it` must be dereferenceable.
static y_absl::string_view ChunkRemaining(const CharIterator& it);
- // Cord::CharIterator::char_begin()
+ // Cord::char_begin()
//
// Returns an iterator to the first character of the `Cord`.
//
@@ -580,7 +626,7 @@ class Cord {
// a `CharIterator` where range-based for-loops may not be available.
CharIterator char_begin() const;
- // Cord::CharIterator::char_end()
+ // Cord::char_end()
//
// Returns an iterator to one past the last character of the `Cord`.
//
@@ -589,13 +635,13 @@ class Cord {
// a `CharIterator` where range-based for-loops are not useful.
CharIterator char_end() const;
- // Cord::CharIterator::CharRange
+ // Cord::CharRange
//
// `CharRange` is a helper class for iterating over the characters of a
// producing an iterator which can be used within a range-based for loop.
// Construction of a `CharRange` will return an iterator pointing to the first
// character of the Cord. Generally, do not construct a `CharRange` directly;
- // instead, prefer to use the `Cord::Chars()` method show below.
+ // instead, prefer to use the `Cord::Chars()` method shown below.
//
// Implementation note: `CharRange` is simply a convenience wrapper over
// `Cord::char_begin()` and `Cord::char_end()`.
@@ -620,11 +666,11 @@ class Cord {
const Cord* cord_;
};
- // Cord::CharIterator::Chars()
+ // Cord::Chars()
//
- // Returns a `Cord::CharIterator` for iterating over the characters of a
- // `Cord` with a range-based for-loop. For most character-based iteration
- // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator.
+ // Returns a `Cord::CharRange` for iterating over the characters of a `Cord`
+ // with a range-based for-loop. For most character-based iteration tasks on a
+ // Cord, use `Cord::Chars()` to retrieve this iterator.
//
// Example:
//
@@ -671,6 +717,29 @@ class Cord {
cord->Append(part);
}
+ // Cord::SetExpectedChecksum()
+ //
+ // Stores a checksum value with this non-empty cord instance, for later
+ // retrieval.
+ //
+ // The expected checksum is a number stored out-of-band, alongside the data.
+ // It is preserved across copies and assignments, but any mutations to a cord
+ // will cause it to lose its expected checksum.
+ //
+ // The expected checksum is not part of a Cord's value, and does not affect
+ // operations such as equality or hashing.
+ //
+ // This field is intended to store a CRC32C checksum for later validation, to
+ // help support end-to-end checksum workflows. However, the Cord API itself
+ // does no CRC validation, and assigns no meaning to this number.
+ //
+ // This call has no effect if this cord is empty.
+ void SetExpectedChecksum(uint32_t crc);
+
+ // Returns this cord's expected checksum, if it has one. Otherwise, returns
+ // nullopt.
+ y_absl::optional<uint32_t> ExpectedChecksum() const;
+
template <typename H>
friend H AbslHashValue(H hash_state, const y_absl::Cord& c) {
y_absl::optional<y_absl::string_view> maybe_flat = c.TryFlat();
@@ -686,7 +755,8 @@ class Cord {
// be used by spelling y_absl::strings_internal::MakeStringConstant, which is
// also an internal API.
template <typename T>
- explicit constexpr Cord(strings_internal::StringConstant<T>);
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ constexpr Cord(strings_internal::StringConstant<T>);
private:
using CordRep = y_absl::cord_internal::CordRep;
@@ -738,12 +808,12 @@ class Cord {
bool empty() const;
size_t size() const;
const char* data() const; // Returns nullptr if holding pointer
- void set_data(const char* data, size_t n,
- bool nullify_tail); // Discards pointer, if any
- char* set_data(size_t n); // Write data to the result
+ void set_data(const char* data, size_t n); // Discards pointer, if any
+ char* set_data(size_t n); // Write data to the result
// Returns nullptr if holding bytes
y_absl::cord_internal::CordRep* tree() const;
y_absl::cord_internal::CordRep* as_tree() const;
+ const char* as_chars() const;
// Returns non-null iff was holding a pointer
y_absl::cord_internal::CordRep* clear();
// Converts to pointer if necessary.
@@ -831,6 +901,11 @@ class Cord {
// Returns true if the Cord is being profiled by cordz.
bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
+ // Returns the available inlined capacity, or 0 if is_tree() == true.
+ size_t remaining_inline_capacity() const {
+ return data_.is_tree() ? 0 : kMaxInline - data_.inline_size();
+ }
+
// Returns the profiled CordzInfo, or nullptr if not sampled.
y_absl::cord_internal::CordzInfo* cordz_info() const {
return data_.cordz_info();
@@ -861,9 +936,6 @@ class Cord {
};
InlineRep contents_;
- // Helper for MemoryUsage().
- static size_t MemoryUsageAux(const y_absl::cord_internal::CordRep* rep);
-
// Helper for GetFlat() and TryFlat().
static bool GetFlatAux(y_absl::cord_internal::CordRep* rep,
y_absl::string_view* fragment);
@@ -901,6 +973,15 @@ class Cord {
template <typename C>
void AppendImpl(C&& src);
+ // Appends / Prepends `src` to this instance, using precise sizing.
+ // This method does explicitly not attempt to use any spare capacity
+ // in any pending last added private owned flat.
+ // Requires `src` to be <= kMaxFlatLength.
+ void AppendPrecise(y_absl::string_view src, MethodIdentifier method);
+ void PrependPrecise(y_absl::string_view src, MethodIdentifier method);
+
+ CordBuffer GetAppendBufferSlowPath(size_t capacity, size_t min_capacity);
+
// Prepends the provided data to this instance. `method` contains the public
// API method for this action which is tracked for Cordz sampling purposes.
void PrependArray(y_absl::string_view src, MethodIdentifier method);
@@ -938,8 +1019,8 @@ namespace cord_internal {
// Fast implementation of memmove for up to 15 bytes. This implementation is
// safe for overlapping regions. If nullify_tail is true, the destination is
// padded with '\0' up to 16 bytes.
-inline void SmallMemmove(char* dst, const char* src, size_t n,
- bool nullify_tail = false) {
+template <bool nullify_tail = false>
+inline void SmallMemmove(char* dst, const char* src, size_t n) {
if (n >= 8) {
assert(n <= 16);
uint64_t buf1;
@@ -976,22 +1057,16 @@ inline void SmallMemmove(char* dst, const char* src, size_t n,
}
// Does non-template-specific `CordRepExternal` initialization.
-// Expects `data` to be non-empty.
+// Requires `data` to be non-empty.
void InitializeCordRepExternal(y_absl::string_view data, CordRepExternal* rep);
// Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
-// to it, or `nullptr` if `data` was empty.
+// to it. Requires `data` to be non-empty.
template <typename Releaser>
// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
CordRep* NewExternalRep(y_absl::string_view data, Releaser&& releaser) {
+ assert(!data.empty());
using ReleaserType = y_absl::decay_t<Releaser>;
- if (data.empty()) {
- // Never create empty external nodes.
- InvokeReleaser(Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
- data);
- return nullptr;
- }
-
CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
std::forward<Releaser>(releaser), 0);
InitializeCordRepExternal(data, rep);
@@ -1011,10 +1086,15 @@ inline CordRep* NewExternalRep(y_absl::string_view data,
template <typename Releaser>
Cord MakeCordFromExternal(y_absl::string_view data, Releaser&& releaser) {
Cord cord;
- if (auto* rep = ::y_absl::cord_internal::NewExternalRep(
- data, std::forward<Releaser>(releaser))) {
- cord.contents_.EmplaceTree(rep,
+ if (Y_ABSL_PREDICT_TRUE(!data.empty())) {
+ cord.contents_.EmplaceTree(::y_absl::cord_internal::NewExternalRep(
+ data, std::forward<Releaser>(releaser)),
Cord::MethodIdentifier::kMakeCordFromExternal);
+ } else {
+ using ReleaserType = y_absl::decay_t<Releaser>;
+ cord_internal::InvokeReleaser(
+ cord_internal::Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+ data);
}
return cord;
}
@@ -1069,6 +1149,11 @@ inline const char* Cord::InlineRep::data() const {
return is_tree() ? nullptr : data_.as_chars();
}
+inline const char* Cord::InlineRep::as_chars() const {
+ assert(!data_.is_tree());
+ return data_.as_chars();
+}
+
inline y_absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
assert(data_.is_tree());
return data_.as_tree();
@@ -1207,10 +1292,15 @@ inline size_t Cord::size() const {
inline bool Cord::empty() const { return contents_.empty(); }
-inline size_t Cord::EstimatedMemoryUsage() const {
+inline size_t Cord::EstimatedMemoryUsage(
+ CordMemoryAccounting accounting_method) const {
size_t result = sizeof(Cord);
if (const y_absl::cord_internal::CordRep* rep = contents_.tree()) {
- result += MemoryUsageAux(rep);
+ if (accounting_method == CordMemoryAccounting::kFairShare) {
+ result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
+ } else {
+ result += cord_internal::GetEstimatedMemoryUsage(rep);
+ }
}
return result;
}
@@ -1248,6 +1338,31 @@ inline void Cord::Prepend(y_absl::string_view src) {
PrependArray(src, CordzUpdateTracker::kPrependString);
}
+inline void Cord::Append(CordBuffer buffer) {
+ if (Y_ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ y_absl::string_view short_value;
+ if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+ contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer);
+ } else {
+ AppendPrecise(short_value, CordzUpdateTracker::kAppendCordBuffer);
+ }
+}
+
+inline void Cord::Prepend(CordBuffer buffer) {
+ if (Y_ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ y_absl::string_view short_value;
+ if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+ contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer);
+ } else {
+ PrependPrecise(short_value, CordzUpdateTracker::kPrependCordBuffer);
+ }
+}
+
+inline CordBuffer Cord::GetAppendBuffer(size_t capacity, size_t min_capacity) {
+ if (empty()) return CordBuffer::CreateWithDefaultLimit(capacity);
+ return GetAppendBufferSlowPath(capacity, min_capacity);
+}
+
extern template void Cord::Append(TString&& src);
extern template void Cord::Prepend(TString&& src);
@@ -1274,27 +1389,27 @@ inline bool Cord::StartsWith(y_absl::string_view rhs) const {
}
inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
+ tree = cord_internal::SkipCrcNode(tree);
if (tree->tag == cord_internal::BTREE) {
current_chunk_ = btree_reader_.Init(tree->btree());
- return;
+ } else {
+ current_leaf_ = tree;
+ current_chunk_ = cord_internal::EdgeData(tree);
}
-
- stack_of_right_children_.push_back(tree);
- operator++();
}
-inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree)
- : bytes_remaining_(tree->length) {
+inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) {
+ bytes_remaining_ = tree->length;
InitTree(tree);
}
-inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
- : bytes_remaining_(cord->size()) {
- if (cord->contents_.is_tree()) {
- InitTree(cord->contents_.as_tree());
+inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) {
+ if (CordRep* tree = cord->contents_.tree()) {
+ bytes_remaining_ = tree->length;
+ InitTree(tree);
} else {
- current_chunk_ =
- y_absl::string_view(cord->contents_.data(), bytes_remaining_);
+ bytes_remaining_ = cord->contents_.inline_size();
+ current_chunk_ = {cord->contents_.data(), bytes_remaining_};
}
}
@@ -1324,8 +1439,11 @@ inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
assert(bytes_remaining_ >= current_chunk_.size());
bytes_remaining_ -= current_chunk_.size();
if (bytes_remaining_ > 0) {
- return btree_reader_ ? AdvanceBtree() : AdvanceStack();
- } else {
+ if (btree_reader_) {
+ return AdvanceBtree();
+ } else {
+ assert(!current_chunk_.empty()); // Called on invalid iterator.
+ }
current_chunk_ = {};
}
return *this;
@@ -1366,7 +1484,11 @@ inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
if (Y_ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
RemoveChunkPrefix(n);
} else if (n != 0) {
- btree_reader_ ? AdvanceBytesBtree(n) : AdvanceBytesSlowPath(n);
+ if (btree_reader_) {
+ AdvanceBytesBtree(n);
+ } else {
+ bytes_remaining_ = 0;
+ }
}
}
@@ -1457,7 +1579,7 @@ inline void Cord::ForEachChunk(
}
}
-// Nonmember Cord-to-Cord relational operarators.
+// Nonmember Cord-to-Cord relational operators.
inline bool operator==(const Cord& lhs, const Cord& rhs) {
if (lhs.contents_.IsSame(rhs.contents_)) return true;
size_t rhs_size = rhs.size();
@@ -1508,7 +1630,6 @@ class CordTestAccess {
public:
static size_t FlatOverhead();
static size_t MaxFlatLength();
- static size_t SizeofCordRepConcat();
static size_t SizeofCordRepExternal();
static size_t SizeofCordRepSubstring();
static size_t FlatTagToLength(uint8_t tag);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
new file mode 100644
index 0000000000..9c925510cc
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
@@ -0,0 +1,188 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/cord_analysis.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/config.h"
+#include "y_absl/container/inlined_vector.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_crc.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/internal/cord_rep_ring.h"
+//
+#include "y_absl/base/macros.h"
+#include "y_absl/base/port.h"
+#include "y_absl/functional/function_ref.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+// Accounting mode for analyzing memory usage.
+enum class Mode { kTotal, kFairShare };
+
+// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode,
+// holds a 'fraction' representing a cumulative inverse refcount weight.
+template <Mode mode>
+struct CordRepRef {
+ // Instantiates a CordRepRef instance.
+ explicit CordRepRef(const CordRep* r) : rep(r) {}
+
+ // Creates a child reference holding the provided child.
+ // Overloaded to add cumulative reference count for kFairShare.
+ CordRepRef Child(const CordRep* child) const { return CordRepRef(child); }
+
+ const CordRep* rep;
+};
+
+// RawUsage holds the computed total number of bytes.
+template <Mode mode>
+struct RawUsage {
+ size_t total = 0;
+
+ // Add 'size' to total, ignoring the CordRepRef argument.
+ void Add(size_t size, CordRepRef<mode>) { total += size; }
+};
+
+// Returns n / refcount avoiding a div for the common refcount == 1.
+template <typename refcount_t>
+double MaybeDiv(double d, refcount_t refcount) {
+ return refcount == 1 ? d : d / refcount;
+}
+
+// Overloaded 'kFairShare' specialization for CordRepRef. This class holds a
+// `fraction` value which represents a cumulative inverse refcount weight.
+// For example, a top node with a reference count of 2 will have a fraction
+// value of 1/2 = 0.5, representing the 'fair share' of memory it references.
+// A node below such a node with a reference count of 5 then has a fraction of
+// 0.5 / 5 = 0.1 representing the fair share of memory below that node, etc.
+template <>
+struct CordRepRef<Mode::kFairShare> {
+ // Creates a CordRepRef with the provided rep and top (parent) fraction.
+ explicit CordRepRef(const CordRep* r, double frac = 1.0)
+ : rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {}
+
+ // Returns a CordRepRef with a fraction of `this->fraction / child.refcount`
+ CordRepRef Child(const CordRep* child) const {
+ return CordRepRef(child, fraction);
+ }
+
+ const CordRep* rep;
+ double fraction;
+};
+
+// Overloaded 'kFairShare' specialization for RawUsage
+template <>
+struct RawUsage<Mode::kFairShare> {
+ double total = 0;
+
+ // Adds `size` multiplied by `rep.fraction` to the total size.
+ void Add(size_t size, CordRepRef<Mode::kFairShare> rep) {
+ total += static_cast<double>(size) * rep.fraction;
+ }
+};
+
+// Computes the estimated memory size of the provided data edge.
+// External reps are assumed 'heap allocated at their exact size'.
+template <Mode mode>
+void AnalyzeDataEdge(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ assert(IsDataEdge(rep.rep));
+
+ // Consume all substrings
+ if (rep.rep->tag == SUBSTRING) {
+ raw_usage.Add(sizeof(CordRepSubstring), rep);
+ rep = rep.Child(rep.rep->substring()->child);
+ }
+
+ // Consume FLAT / EXTERNAL
+ const size_t size =
+ rep.rep->tag >= FLAT
+ ? rep.rep->flat()->AllocatedSize()
+ : rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
+ raw_usage.Add(size, rep);
+}
+
+// Computes the memory size of the provided Ring tree.
+template <Mode mode>
+void AnalyzeRing(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ const CordRepRing* ring = rep.rep->ring();
+ raw_usage.Add(CordRepRing::AllocSize(ring->capacity()), rep);
+ ring->ForEach([&](CordRepRing::index_type pos) {
+ AnalyzeDataEdge(rep.Child(ring->entry_child(pos)), raw_usage);
+ });
+}
+
+// Computes the memory size of the provided Btree tree.
+template <Mode mode>
+void AnalyzeBtree(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ raw_usage.Add(sizeof(CordRepBtree), rep);
+ const CordRepBtree* tree = rep.rep->btree();
+ if (tree->height() > 0) {
+ for (CordRep* edge : tree->Edges()) {
+ AnalyzeBtree(rep.Child(edge), raw_usage);
+ }
+ } else {
+ for (CordRep* edge : tree->Edges()) {
+ AnalyzeDataEdge(rep.Child(edge), raw_usage);
+ }
+ }
+}
+
+template <Mode mode>
+size_t GetEstimatedUsage(const CordRep* rep) {
+ // Zero initialized memory usage totals.
+ RawUsage<mode> raw_usage;
+
+ // Capture top level node and refcount into a CordRepRef.
+ CordRepRef<mode> repref(rep);
+
+ // Consume the top level CRC node if present.
+ if (repref.rep->tag == CRC) {
+ raw_usage.Add(sizeof(CordRepCrc), repref);
+ repref = repref.Child(repref.rep->crc()->child);
+ }
+
+ if (IsDataEdge(repref.rep)) {
+ AnalyzeDataEdge(repref, raw_usage);
+ } else if (repref.rep->tag == BTREE) {
+ AnalyzeBtree(repref, raw_usage);
+ } else if (repref.rep->tag == RING) {
+ AnalyzeRing(repref, raw_usage);
+ } else {
+ assert(false);
+ }
+
+ return static_cast<size_t>(raw_usage.total);
+}
+
+} // namespace
+
+size_t GetEstimatedMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kTotal>(rep);
+}
+
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kFairShare>(rep);
+}
+
+} // namespace cord_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h
new file mode 100644
index 0000000000..75f9216ae3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef Y_ABSL_STRINGS_CORD_ANALYSIS_H_
+#define Y_ABSL_STRINGS_CORD_ANALYSIS_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// Cord (which may not remain the same between invocations). Cords that share
+// memory could each be "charged" independently for the same shared memory.
+size_t GetEstimatedMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// CordRep weighted by the sharing ratio of that data. For example, if some data
+// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
+// the total memory usage as a 'fair share' of the total memory usage.
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep);
+
+} // namespace cord_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+
+#endif // Y_ABSL_STRINGS_CORD_ANALYSIS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.cc
index 924d6e3d54..f7fc02fe39 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check_disable.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2022 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// https://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,9 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
- return 1;
-}
+#include "y_absl/strings/cord_buffer.h"
+
+#include <cstddef>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordBuffer::kDefaultLimit;
+constexpr size_t CordBuffer::kCustomLimit;
+#endif
+
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h
new file mode 100644
index 0000000000..cbe4287b07
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h
@@ -0,0 +1,572 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cord_buffer.h
+// -----------------------------------------------------------------------------
+//
+// This file defines an `y_absl::CordBuffer` data structure to hold data for
+// eventual inclusion within an existing `Cord` data structure. Cord buffers are
+// useful for building large Cords that may require custom allocation of its
+// associated memory.
+//
+#ifndef Y_ABSL_STRINGS_CORD_BUFFER_H_
+#define Y_ABSL_STRINGS_CORD_BUFFER_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/macros.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/types/span.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+
+class Cord;
+class CordBufferTestPeer;
+
+// CordBuffer
+//
+// CordBuffer manages memory buffers for purposes such as zero-copy APIs as well
+// as applications building cords with large data requiring granular control
+// over the allocation and size of cord data. For example, a function creating
+// a cord of random data could use a CordBuffer as follows:
+//
+// y_absl::Cord CreateRandomCord(size_t length) {
+// y_absl::Cord cord;
+// while (length > 0) {
+// CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(length);
+// y_absl::Span<char> data = buffer.available_up_to(length);
+// FillRandomValues(data.data(), data.size());
+// buffer.IncreaseLengthBy(data.size());
+// cord.Append(std::move(buffer));
+// length -= data.size();
+// }
+// return cord;
+// }
+//
+// CordBuffer instances are by default limited to a capacity of `kDefaultLimit`
+// bytes. `kDefaultLimit` is currently just under 4KiB, but this default may
+// change in the future and/or for specific architectures. The default limit is
+// aimed to provide a good trade-off between performance and memory overhead.
+// Smaller buffers typically incur more compute cost while larger buffers are
+// more CPU efficient but create significant memory overhead because of such
+// allocations being less granular. Using larger buffers may also increase the
+// risk of memory fragmentation.
+//
+// Applications create a buffer using one of the `CreateWithDefaultLimit()` or
+// `CreateWithCustomLimit()` methods. The returned instance will have a non-zero
+// capacity and a zero length. Applications use the `data()` method to set the
+// contents of the managed memory, and once done filling the buffer, use the
+// `IncreaseLengthBy()` or 'SetLength()' method to specify the length of the
+// initialized data before adding the buffer to a Cord.
+//
+// The `CreateWithCustomLimit()` method is intended for applications needing
+// larger buffers than the default memory limit, allowing the allocation of up
+// to a capacity of `kCustomLimit` bytes minus some minimum internal overhead.
+// The usage of `CreateWithCustomLimit()` should be limited to only those use
+// cases where the distribution of the input is relatively well known, and/or
+// where the trade-off between the efficiency gains outweigh the risk of memory
+// fragmentation. See the documentation for `CreateWithCustomLimit()` for more
+// information on using larger custom limits.
+//
+// The capacity of a `CordBuffer` returned by one of the `Create` methods may
+// be larger than the requested capacity due to rounding, alignment and
+// granularity of the memory allocator. Applications should use the `capacity`
+// method to obtain the effective capacity of the returned instance as
+// demonstrated in the provided example above.
+//
+// CordBuffer is a move-only class. All references into the managed memory are
+// invalidated when an instance is moved into either another CordBuffer instance
+// or a Cord. Writing to a location obtained by a previous call to `data()`
+// after an instance was moved will lead to undefined behavior.
+//
+// A `moved from` CordBuffer instance will have a valid, but empty state.
+// CordBuffer is thread compatible.
+class CordBuffer {
+ public:
+ // kDefaultLimit
+ //
+ // Default capacity limits of allocated CordBuffers.
+ // See the class comments for more information on allocation limits.
+ static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength;
+
+ // kCustomLimit
+ //
+ // Maximum size for CreateWithCustomLimit() allocated buffers.
+ // Note that the effective capacity may be slightly less
+ // because of internal overhead of internal cord buffers.
+ static constexpr size_t kCustomLimit = 64U << 10;
+
+ // Constructors, Destructors and Assignment Operators
+
+ // Creates an empty CordBuffer.
+ CordBuffer() = default;
+
+ // Destroys this CordBuffer instance and, if not empty, releases any memory
+ // managed by this instance, invalidating previously returned references.
+ ~CordBuffer();
+
+ // CordBuffer is move-only
+ CordBuffer(CordBuffer&& rhs) noexcept;
+ CordBuffer& operator=(CordBuffer&&) noexcept;
+ CordBuffer(const CordBuffer&) = delete;
+ CordBuffer& operator=(const CordBuffer&) = delete;
+
+ // CordBuffer::MaximumPayload()
+ //
+ // Returns the guaranteed maximum payload for a CordBuffer returned by the
+ // `CreateWithDefaultLimit()` method. While small, each internal buffer inside
+ // a Cord incurs an overhead to manage the length, type and reference count
+ // for the buffer managed inside the cord tree. Applications can use this
+ // method to get approximate number of buffers required for a given byte
+ // size, etc.
+ //
+ // For example:
+ // const size_t payload = y_absl::CordBuffer::MaximumPayload();
+ // const size_t buffer_count = (total_size + payload - 1) / payload;
+ // buffers.reserve(buffer_count);
+ static constexpr size_t MaximumPayload();
+
+ // Overload to the above `MaximumPayload()` except that it returns the
+ // maximum payload for a CordBuffer returned by the `CreateWithCustomLimit()`
+ // method given the provided `block_size`.
+ static constexpr size_t MaximumPayload(size_t block_size);
+
+ // CordBuffer::CreateWithDefaultLimit()
+ //
+ // Creates a CordBuffer instance of the desired `capacity`, capped at the
+ // default limit `kDefaultLimit`. The returned buffer has a guaranteed
+ // capacity of at least `min(kDefaultLimit, capacity)`. See the class comments
+ // for more information on buffer capacities and intended usage.
+ static CordBuffer CreateWithDefaultLimit(size_t capacity);
+
+
+ // CordBuffer::CreateWithCustomLimit()
+ //
+ // Creates a CordBuffer instance of the desired `capacity` rounded to an
+ // appropriate power of 2 size less than, or equal to `block_size`.
+ // Requires `block_size` to be a power of 2.
+ //
+ // If `capacity` is less than or equal to `kDefaultLimit`, then this method
+ // behaves identical to `CreateWithDefaultLimit`, which means that the caller
+ // is guaranteed to get a buffer of at least the requested capacity.
+ //
+ // If `capacity` is greater than or equal to `block_size`, then this method
+ // returns a buffer with an `allocated size` of `block_size` bytes. Otherwise,
+ // this methods returns a buffer with a suitable smaller power of 2 block size
+ // to satisfy the request. The actual size depends on a number of factors, and
+ // is typically (but not necessarily) the highest or second highest power of 2
+ // value less than or equal to `capacity`.
+ //
+ // The 'allocated size' includes a small amount of overhead required for
+ // internal state, which is currently 13 bytes on 64-bit platforms. For
+ // example: a buffer created with `block_size` and `capacity' set to 8KiB
+ // will have an allocated size of 8KiB, and an effective internal `capacity`
+ // of 8KiB - 13 = 8179 bytes.
+ //
+ // To demonstrate this in practice, let's assume we want to read data from
+ // somewhat larger files using approximately 64KiB buffers:
+ //
+ // y_absl::Cord ReadFromFile(int fd, size_t n) {
+ // y_absl::Cord cord;
+ // while (n > 0) {
+ // CordBuffer buffer = CordBuffer::CreateWithCustomLimit(64 << 10, n);
+ // y_absl::Span<char> data = buffer.available_up_to(n);
+ // ReadFileDataOrDie(fd, data.data(), data.size());
+ // buffer.IncreaseLengthBy(data.size());
+ // cord.Append(std::move(buffer));
+ // n -= data.size();
+ // }
+ // return cord;
+ // }
+ //
+ // If we'd use this function to read a file of 659KiB, we may get the
+ // following pattern of allocated cord buffer sizes:
+ //
+ // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+ // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+ // ...
+ // CreateWithCustomLimit(64KiB, 19586) --> ~16KiB (16371)
+ // CreateWithCustomLimit(64KiB, 3215) --> 3215 (at least 3215)
+ //
+ // The reason the method returns a 16K buffer instead of a roughly 19K buffer
+ // is to reduce memory overhead and fragmentation risks. Using carefully
+ // chosen power of 2 values reduces the entropy of allocated memory sizes.
+ //
+ // Additionally, let's assume we'd use the above function on files that are
+ // generally smaller than 64K. If we'd use 'precise' sized buffers for such
+ // files, than we'd get a very wide distribution of allocated memory sizes
+ // rounded to 4K page sizes, and we'd end up with a lot of unused capacity.
+ //
+ // In general, application should only use custom sizes if the data they are
+ // consuming or storing is expected to be many times the chosen block size,
+ // and be based on objective data and performance metrics. For example, a
+ // compress function may work faster and consume less CPU when using larger
+ // buffers. Such an application should pick a size offering a reasonable
+ // trade-off between expected data size, compute savings with larger buffers,
+ // and the cost or fragmentation effect of larger buffers.
+ // Applications must pick a reasonable spot on that curve, and make sure their
+ // data meets their expectations in size distributions such as "mostly large".
+ static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity);
+
+ // CordBuffer::available()
+ //
+ // Returns the span delineating the available capacity in this buffer
+ // which is defined as `{ data() + length(), capacity() - length() }`.
+ y_absl::Span<char> available();
+
+ // CordBuffer::available_up_to()
+ //
+ // Returns the span delineating the available capacity in this buffer limited
+ // to `size` bytes. This is equivalent to `available().subspan(0, size)`.
+ y_absl::Span<char> available_up_to(size_t size);
+
+ // CordBuffer::data()
+ //
+ // Returns a non-null reference to the data managed by this instance.
+ // Applications are allowed to write up to `capacity` bytes of instance data.
+ // CordBuffer data is uninitialized by default. Reading data from an instance
+ // that has not yet been initialized will lead to undefined behavior.
+ char* data();
+ const char* data() const;
+
+ // CordBuffer::length()
+ //
+ // Returns the length of this instance. The default length of a CordBuffer is
+ // 0, indicating an 'empty' CordBuffer. Applications must specify the length
+ // of the data in a CordBuffer before adding it to a Cord.
+ size_t length() const;
+
+ // CordBuffer::capacity()
+ //
+ // Returns the capacity of this instance. All instances have a non-zero
+ // capacity: default and `moved from` instances have a small internal buffer.
+ size_t capacity() const;
+
+ // CordBuffer::IncreaseLengthBy()
+ //
+ // Increases the length of this buffer by the specified 'n' bytes.
+ // Applications must make sure all data in this buffer up to the new length
+ // has been initialized before adding a CordBuffer to a Cord: failure to do so
+ // will lead to undefined behavior. Requires `length() + n <= capacity()`.
+ // Typically, applications will use 'available_up_to()` to get a span of the
+ // desired capacity, and use `span.size()` to increase the length as in:
+ // y_absl::Span<char> span = buffer.available_up_to(desired);
+ // buffer.IncreaseLengthBy(span.size());
+ // memcpy(span.data(), src, span.size());
+ // etc...
+ void IncreaseLengthBy(size_t n);
+
+ // CordBuffer::SetLength()
+ //
+ // Sets the data length of this instance. Applications must make sure all data
+ // of the specified length has been initialized before adding a CordBuffer to
+ // a Cord: failure to do so will lead to undefined behavior.
+ // Setting the length to a small value or zero does not release any memory
+ // held by this CordBuffer instance. Requires `length <= capacity()`.
+ // Applications should preferably use the `IncreaseLengthBy()` method above
+ // in combination with the 'available()` or `available_up_to()` methods.
+ void SetLength(size_t length);
+
+ private:
+ // Make sure we don't accidentally over promise.
+ static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, "");
+
+ // Assume the cost of an 'uprounded' allocation to CeilPow2(size) versus
+ // the cost of allocating at least 1 extra flat <= 4KB:
+ // - Flat overhead = 13 bytes
+ // - Btree amortized cost / node =~ 13 bytes
+ // - 64 byte granularity of tcmalloc at 4K =~ 32 byte average
+ // CPU cost and efficiency requires we should at least 'save' something by
+ // splitting, as a poor man's measure, we say the slop needs to be
+ // at least double the cost offset to make it worth splitting: ~128 bytes.
+ static constexpr size_t kMaxPageSlop = 128;
+
+ // Overhead for allocation a flat.
+ static constexpr size_t kOverhead = cord_internal::kFlatOverhead;
+
+ using CordRepFlat = cord_internal::CordRepFlat;
+
+ // `Rep` is the internal data representation of a CordBuffer. The internal
+ // representation has an internal small size optimization similar to
+ // TString (SSO).
+ struct Rep {
+ // Inline SSO size of a CordBuffer
+ static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1;
+
+ // Creates a default instance with kInlineCapacity.
+ Rep() : short_rep{} {}
+
+ // Creates an instance managing an allocated non zero CordRep.
+ explicit Rep(cord_internal::CordRepFlat* rep) : long_rep{rep} {
+ assert(rep != nullptr);
+ }
+
+ // Returns true if this instance manages the SSO internal buffer.
+ bool is_short() const {
+ constexpr size_t offset = offsetof(Short, raw_size);
+ return (reinterpret_cast<const char*>(this)[offset] & 1) != 0;
+ }
+
+ // Returns the available area of the internal SSO data
+ y_absl::Span<char> short_available() {
+ assert(is_short());
+ const size_t length = (short_rep.raw_size >> 1);
+ return y_absl::Span<char>(short_rep.data + length,
+ kInlineCapacity - length);
+ }
+
+ // Returns the available area of the internal SSO data
+ y_absl::Span<char> long_available() {
+ assert(!is_short());
+ const size_t length = long_rep.rep->length;
+ return y_absl::Span<char>(long_rep.rep->Data() + length,
+ long_rep.rep->Capacity() - length);
+ }
+
+ // Returns the length of the internal SSO data.
+ size_t short_length() const {
+ assert(is_short());
+ return short_rep.raw_size >> 1;
+ }
+
+ // Sets the length of the internal SSO data.
+ // Disregards any previously set CordRep instance.
+ void set_short_length(size_t length) {
+ short_rep.raw_size = static_cast<char>((length << 1) + 1);
+ }
+
+ // Adds `n` to the current short length.
+ void add_short_length(size_t n) {
+ assert(is_short());
+ short_rep.raw_size += static_cast<char>(n << 1);
+ }
+
+ // Returns reference to the internal SSO data buffer.
+ char* data() {
+ assert(is_short());
+ return short_rep.data;
+ }
+ const char* data() const {
+ assert(is_short());
+ return short_rep.data;
+ }
+
+ // Returns a pointer the external CordRep managed by this instance.
+ cord_internal::CordRepFlat* rep() const {
+ assert(!is_short());
+ return long_rep.rep;
+ }
+
+ // The internal representation takes advantage of the fact that allocated
+ // memory is always on an even address, and uses the least significant bit
+ // of the first or last byte (depending on endianness) as the inline size
+ // indicator overlapping with the least significant byte of the CordRep*.
+#if defined(Y_ABSL_IS_BIG_ENDIAN)
+ struct Long {
+ explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+ void* padding;
+ cord_internal::CordRepFlat* rep;
+ };
+ struct Short {
+ char data[sizeof(Long) - 1];
+ char raw_size = 1;
+ };
+#else
+ struct Long {
+ explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+ cord_internal::CordRepFlat* rep;
+ void* padding;
+ };
+ struct Short {
+ char raw_size = 1;
+ char data[sizeof(Long) - 1];
+ };
+#endif
+
+ union {
+ Long long_rep;
+ Short short_rep;
+ };
+ };
+
+ // Power2 functions
+ static bool IsPow2(size_t size) { return y_absl::has_single_bit(size); }
+ static size_t Log2Floor(size_t size) { return y_absl::bit_width(size) - 1; }
+ static size_t Log2Ceil(size_t size) { return y_absl::bit_width(size - 1); }
+
+ // Implementation of `CreateWithCustomLimit()`.
+ // This implementation allows for future memory allocation hints to
+ // be passed down into the CordRepFlat allocation function.
+ template <typename... AllocationHints>
+ static CordBuffer CreateWithCustomLimitImpl(size_t block_size,
+ size_t capacity,
+ AllocationHints... hints);
+
+ // Consumes the value contained in this instance and resets the instance.
+ // This method returns a non-null Cordrep* if the current instances manages a
+ // CordRep*, and resets the instance to an empty SSO instance. If the current
+ // instance is an SSO instance, then this method returns nullptr and sets
+ // `short_value` to the inlined data value. In either case, the current
+ // instance length is reset to zero.
+ // This method is intended to be used by Cord internal functions only.
+ cord_internal::CordRep* ConsumeValue(y_absl::string_view& short_value) {
+ cord_internal::CordRep* rep = nullptr;
+ if (rep_.is_short()) {
+ short_value = y_absl::string_view(rep_.data(), rep_.short_length());
+ } else {
+ rep = rep_.rep();
+ }
+ rep_.set_short_length(0);
+ return rep;
+ }
+
+ // Internal constructor.
+ explicit CordBuffer(cord_internal::CordRepFlat* rep) : rep_(rep) {
+ assert(rep != nullptr);
+ }
+
+ Rep rep_;
+
+ friend class Cord;
+ friend class CordBufferTestPeer;
+};
+
+inline constexpr size_t CordBuffer::MaximumPayload() {
+ return cord_internal::kMaxFlatLength;
+}
+
+inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
+ // TODO(y_absl-team): Use std::min when C++11 support is dropped.
+ return (kCustomLimit < block_size ? kCustomLimit : block_size) -
+ cord_internal::kFlatOverhead;
+}
+
+inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
+ if (capacity > Rep::kInlineCapacity) {
+ auto* rep = cord_internal::CordRepFlat::New(capacity);
+ rep->length = 0;
+ return CordBuffer(rep);
+ }
+ return CordBuffer();
+}
+
+template <typename... AllocationHints>
+inline CordBuffer CordBuffer::CreateWithCustomLimitImpl(
+ size_t block_size, size_t capacity, AllocationHints... hints) {
+ assert(IsPow2(block_size));
+ capacity = (std::min)(capacity, kCustomLimit);
+ block_size = (std::min)(block_size, kCustomLimit);
+ if (capacity + kOverhead >= block_size) {
+ capacity = block_size;
+ } else if (capacity <= kDefaultLimit) {
+ capacity = capacity + kOverhead;
+ } else if (!IsPow2(capacity)) {
+ // Check if rounded up to next power 2 is a good enough fit
+ // with limited waste making it an acceptable direct fit.
+ const size_t rounded_up = size_t{1} << Log2Ceil(capacity);
+ const size_t slop = rounded_up - capacity;
+ if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) {
+ capacity = rounded_up;
+ } else {
+ // Round down to highest power of 2 <= capacity.
+ // Consider a more aggressive step down if that may reduce the
+ // risk of fragmentation where 'people are holding it wrong'.
+ const size_t rounded_down = size_t{1} << Log2Floor(capacity);
+ capacity = rounded_down;
+ }
+ }
+ const size_t length = capacity - kOverhead;
+ auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...);
+ rep->length = 0;
+ return CordBuffer(rep);
+}
+
+inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size,
+ size_t capacity) {
+ return CreateWithCustomLimitImpl(block_size, capacity);
+}
+
+inline CordBuffer::~CordBuffer() {
+ if (!rep_.is_short()) {
+ cord_internal::CordRepFlat::Delete(rep_.rep());
+ }
+}
+
+inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : rep_(rhs.rep_) {
+ rhs.rep_.set_short_length(0);
+}
+
+inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept {
+ if (!rep_.is_short()) cord_internal::CordRepFlat::Delete(rep_.rep());
+ rep_ = rhs.rep_;
+ rhs.rep_.set_short_length(0);
+ return *this;
+}
+
+inline y_absl::Span<char> CordBuffer::available() {
+ return rep_.is_short() ? rep_.short_available() : rep_.long_available();
+}
+
+inline y_absl::Span<char> CordBuffer::available_up_to(size_t size) {
+ return available().subspan(0, size);
+}
+
+inline char* CordBuffer::data() {
+ return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline const char* CordBuffer::data() const {
+ return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline size_t CordBuffer::capacity() const {
+ return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity();
+}
+
+inline size_t CordBuffer::length() const {
+ return rep_.is_short() ? rep_.short_length() : rep_.rep()->length;
+}
+
+inline void CordBuffer::SetLength(size_t length) {
+ Y_ABSL_HARDENING_ASSERT(length <= capacity());
+ if (rep_.is_short()) {
+ rep_.set_short_length(length);
+ } else {
+ rep_.rep()->length = length;
+ }
+}
+
+inline void CordBuffer::IncreaseLengthBy(size_t n) {
+ Y_ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity());
+ if (rep_.is_short()) {
+ rep_.add_short_length(n);
+ } else {
+ rep_.rep()->length += n;
+ }
+}
+
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_STRINGS_CORD_BUFFER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_data_edge.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_data_edge.h
new file mode 100644
index 0000000000..afdaa3c8c3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_data_edge.h
@@ -0,0 +1,63 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef Y_ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+#define Y_ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+
+#include <cassert>
+#include <cstddef>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_internal.h"
+#include "y_absl/strings/internal/cord_rep_flat.h"
+#include "y_absl/strings/string_view.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
+// holding a FLAT or EXTERNAL child rep. Requires `rep != nullptr`.
+inline bool IsDataEdge(const CordRep* edge) {
+ assert(edge != nullptr);
+
+ // The fast path is that `edge` is an EXTERNAL or FLAT node, making the below
+ // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
+ // check in the slow path of the SUBSTRING check to optimize for the hot path.
+ if (edge->tag == EXTERNAL || edge->tag >= FLAT) return true;
+ if (edge->tag == SUBSTRING) edge = edge->substring()->child;
+ return edge->tag == EXTERNAL || edge->tag >= FLAT;
+}
+
+// Returns the `y_absl::string_view` data reference for the provided data edge.
+// Requires 'IsDataEdge(edge) == true`.
+inline y_absl::string_view EdgeData(const CordRep* edge) {
+ assert(IsDataEdge(edge));
+
+ size_t offset = 0;
+ const size_t length = edge->length;
+ if (edge->IsSubstring()) {
+ offset = edge->substring()->start;
+ edge = edge->substring()->child;
+ }
+ return edge->tag >= FLAT
+ ? y_absl::string_view{edge->flat()->Data() + offset, length}
+ : y_absl::string_view{edge->external()->base + offset, length};
+}
+
+} // namespace cord_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
index 75cd0c044e..22a890c232 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
@@ -17,69 +17,57 @@
#include <cassert>
#include <memory>
+#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/container/inlined_vector.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_crc.h"
#include "y_absl/strings/internal/cord_rep_flat.h"
#include "y_absl/strings/internal/cord_rep_ring.h"
+#include "y_absl/strings/str_cat.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-Y_ABSL_CONST_INIT std::atomic<bool> cord_btree_enabled(kCordEnableBtreeDefault);
Y_ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault);
Y_ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault);
Y_ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
+void LogFatalNodeType(CordRep* rep) {
+ Y_ABSL_INTERNAL_LOG(FATAL, y_absl::StrCat("Unexpected node type: ",
+ static_cast<int>(rep->tag)));
+}
+
void CordRep::Destroy(CordRep* rep) {
assert(rep != nullptr);
- y_absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
while (true) {
assert(!rep->refcount.IsImmortal());
- if (rep->tag == CONCAT) {
- CordRepConcat* rep_concat = rep->concat();
- CordRep* right = rep_concat->right;
- if (!right->refcount.Decrement()) {
- pending.push_back(right);
- }
- CordRep* left = rep_concat->left;
- delete rep_concat;
- rep = nullptr;
- if (!left->refcount.Decrement()) {
- rep = left;
- continue;
- }
- } else if (rep->tag == BTREE) {
+ if (rep->tag == BTREE) {
CordRepBtree::Destroy(rep->btree());
- rep = nullptr;
+ return;
} else if (rep->tag == RING) {
CordRepRing::Destroy(rep->ring());
- rep = nullptr;
+ return;
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep);
- rep = nullptr;
+ return;
} else if (rep->tag == SUBSTRING) {
CordRepSubstring* rep_substring = rep->substring();
- CordRep* child = rep_substring->child;
+ rep = rep_substring->child;
delete rep_substring;
- rep = nullptr;
- if (!child->refcount.Decrement()) {
- rep = child;
- continue;
+ if (rep->refcount.Decrement()) {
+ return;
}
+ } else if (rep->tag == CRC) {
+ CordRepCrc::Destroy(rep->crc());
+ return;
} else {
+ assert(rep->IsFlat());
CordRepFlat::Delete(rep);
- rep = nullptr;
- }
-
- if (!pending.empty()) {
- rep = pending.back();
- pending.pop_back();
- } else {
- break;
+ return;
}
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
index 80d02395f8..87201b7fb0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
@@ -21,6 +21,7 @@
#include <cstdint>
#include <type_traits>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/endian.h"
#include "y_absl/base/internal/invoke.h"
@@ -33,16 +34,27 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
+// The overhead of a vtable is too much for Cord, so we roll our own subclasses
+// using only a single byte to differentiate classes from each other - the "tag"
+// byte. Define the subclasses first so we can provide downcasting helper
+// functions in the base class.
+struct CordRep;
+struct CordRepConcat;
+struct CordRepExternal;
+struct CordRepFlat;
+struct CordRepSubstring;
+struct CordRepCrc;
+class CordRepRing;
+class CordRepBtree;
+
class CordzInfo;
// Default feature enable states for cord ring buffers
enum CordFeatureDefaults {
- kCordEnableBtreeDefault = true,
kCordEnableRingBufferDefault = false,
kCordShallowSubcordsDefault = false
};
-extern std::atomic<bool> cord_btree_enabled;
extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled;
@@ -52,10 +64,6 @@ extern std::atomic<bool> shallow_subcords_enabled;
// O(n^2) complexity as recursive / full tree validation is O(n).
extern std::atomic<bool> cord_btree_exhaustive_validation;
-inline void enable_cord_btree(bool enable) {
- cord_btree_enabled.store(enable, std::memory_order_relaxed);
-}
-
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
@@ -80,6 +88,9 @@ enum Constants {
kMaxBytesToCopy = 511
};
+// Emits a fatal error "Unexpected node type: xyz" and aborts the program.
+Y_ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
+
// Compact class for tracking the reference count and state flags for CordRep
// instances. Data is stored in an atomic int32_t for compactness and speed.
class RefcountAndFlags {
@@ -87,9 +98,6 @@ class RefcountAndFlags {
constexpr RefcountAndFlags() : count_{kRefIncrement} {}
struct Immortal {};
explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
- struct WithCrc {};
- explicit constexpr RefcountAndFlags(WithCrc)
- : count_(kCrcFlag | kRefIncrement) {}
// Increments the reference count. Imposes no memory ordering.
inline void Increment() {
@@ -125,32 +133,14 @@ class RefcountAndFlags {
return count_.load(std::memory_order_acquire) >> kNumFlags;
}
- // Returns true if the referenced object carries a CRC value.
- bool HasCrc() const {
- return (count_.load(std::memory_order_relaxed) & kCrcFlag) != 0;
- }
-
- // Returns true iff the atomic integer is 1 and this node does not store
- // a CRC. When both these conditions are met, the current thread owns
- // the reference and no other thread shares it, so its contents may be
- // safely mutated.
- //
- // If the referenced item is shared, carries a CRC, or is immortal,
- // it should not be modified in-place, and this function returns false.
- //
- // This call performs the memory barrier needed for the owning thread
- // to act on the object, so that if it returns true, it may safely
- // assume exclusive access to the object.
- inline bool IsMutable() {
- return (count_.load(std::memory_order_acquire)) == kRefIncrement;
- }
-
- // Returns whether the atomic integer is 1. Similar to IsMutable(),
- // but does not check for a stored CRC. (An unshared node with a CRC is not
- // mutable, because changing its data would invalidate the CRC.)
- //
- // When this returns true, there are no other references, and data sinks
- // may safely adopt the children of the CordRep.
+ // Returns whether the atomic integer is 1.
+ // If the reference count is used in the conventional way, a
+ // reference count of 1 implies that the current thread owns the
+ // reference and no other thread shares it.
+ // This call performs the test for a reference count of one, and
+ // performs the memory barrier needed for the owning thread
+ // to act on the object, knowing that it has exclusive access to the
+ // object. Always returns false when the immortal bit is set.
inline bool IsOne() {
return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
kRefIncrement;
@@ -166,51 +156,43 @@ class RefcountAndFlags {
// used for the StringConstant constructor to avoid collecting immutable
// constant cords.
// kReservedFlag is reserved for future use.
- enum {
+ enum Flags {
kNumFlags = 2,
kImmortalFlag = 0x1,
- kCrcFlag = 0x2,
+ kReservedFlag = 0x2,
kRefIncrement = (1 << kNumFlags),
// Bitmask to use when checking refcount by equality. This masks out
// all flags except kImmortalFlag, which is part of the refcount for
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
- kRefcountMask = ~kCrcFlag,
+ kRefcountMask = ~kReservedFlag,
};
std::atomic<int32_t> count_;
};
-// The overhead of a vtable is too much for Cord, so we roll our own subclasses
-// using only a single byte to differentiate classes from each other - the "tag"
-// byte. Define the subclasses first so we can provide downcasting helper
-// functions in the base class.
-
-struct CordRepConcat;
-struct CordRepExternal;
-struct CordRepFlat;
-struct CordRepSubstring;
-class CordRepRing;
-class CordRepBtree;
-
// Various representations that we allow
enum CordRepKind {
- CONCAT = 0,
+ UNUSED_0 = 0,
SUBSTRING = 1,
- BTREE = 2,
- RING = 3,
- EXTERNAL = 4,
+ CRC = 2,
+ BTREE = 3,
+ RING = 4,
+ EXTERNAL = 5,
// We have different tags for different sized flat arrays,
- // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
- // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
- // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
- // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
- // allocation size. (32 bytes as of now).
- FLAT = 5,
- MAX_FLAT_TAG = 225
+ // starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an
+ // allocated range of 32 bytes to 256 KB. The current granularity is:
+ // - 8 byte granularity for flat sizes in [32 - 512]
+ // - 64 byte granularity for flat sizes in (512 - 8KiB]
+ // - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB]
+ // If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should
+ // be adjusted as well as the Tag <---> Size mapping logic so that FLAT still
+ // represents the minimum flat allocation size. (32 bytes as of now).
+ FLAT = 6,
+ MAX_FLAT_TAG = 248
};
// There are various locations where we want to check if some rep is a 'plain'
@@ -225,6 +207,18 @@ static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
struct CordRep {
+ // Result from an `extract edge` operation. Contains the (possibly changed)
+ // tree node as well as the extracted edge, or {tree, nullptr} if no edge
+ // could be extracted.
+ // On success, the returned `tree` value is null if `extracted` was the only
+ // data edge inside the tree, a data edge if there were only two data edges in
+ // the tree, or the (possibly new / smaller) remaining tree with the extracted
+ // data edge removed.
+ struct ExtractResult {
+ CordRep* tree;
+ CordRep* extracted;
+ };
+
CordRep() = default;
constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
@@ -249,18 +243,18 @@ struct CordRep {
// Returns true if this instance's tag matches the requested type.
constexpr bool IsRing() const { return tag == RING; }
- constexpr bool IsConcat() const { return tag == CONCAT; }
constexpr bool IsSubstring() const { return tag == SUBSTRING; }
+ constexpr bool IsCrc() const { return tag == CRC; }
constexpr bool IsExternal() const { return tag == EXTERNAL; }
constexpr bool IsFlat() const { return tag >= FLAT; }
constexpr bool IsBtree() const { return tag == BTREE; }
inline CordRepRing* ring();
inline const CordRepRing* ring() const;
- inline CordRepConcat* concat();
- inline const CordRepConcat* concat() const;
inline CordRepSubstring* substring();
inline const CordRepSubstring* substring() const;
+ inline CordRepCrc* crc();
+ inline const CordRepCrc* crc() const;
inline CordRepExternal* external();
inline const CordRepExternal* external() const;
inline CordRepFlat* flat();
@@ -283,17 +277,23 @@ struct CordRep {
static inline void Unref(CordRep* rep);
};
-struct CordRepConcat : public CordRep {
- CordRep* left;
- CordRep* right;
-
- uint8_t depth() const { return storage[0]; }
- void set_depth(uint8_t depth) { storage[0] = depth; }
-};
-
struct CordRepSubstring : public CordRep {
size_t start; // Starting offset of substring in child
CordRep* child;
+
+ // Creates a substring on `child`, adopting a reference on `child`.
+ // Requires `child` to be either a flat or external node, and `pos` and `n` to
+ // form a non-empty partial sub range of `'child`, i.e.:
+ // `n > 0 && n < length && n + pos <= length`
+ static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n);
+
+ // Creates a substring of `rep`. Does not adopt a reference on `rep`.
+ // Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`.
+ // If `n == rep->length` then this method returns `CordRep::Ref(rep)`
+ // If `rep` is a substring of a flat or external node, then this method will
+ // return a new substring of that flat or external node with `pos` adjusted
+ // with the original `start` position.
+ static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n);
};
// Type for function pointer that will invoke the releaser function and also
@@ -357,6 +357,47 @@ struct CordRepExternalImpl
}
};
+inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos,
+ size_t n) {
+ assert(child != nullptr);
+ assert(n > 0);
+ assert(n < child->length);
+ assert(pos < child->length);
+ assert(n <= child->length - pos);
+
+ // TODO(b/217376272): Harden internal logic.
+ // Move to strategical places inside the Cord logic and make this an assert.
+ if (Y_ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
+ LogFatalNodeType(child);
+ }
+
+ CordRepSubstring* rep = new CordRepSubstring();
+ rep->length = n;
+ rep->tag = SUBSTRING;
+ rep->start = pos;
+ rep->child = child;
+ return rep;
+}
+
+inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos,
+ size_t n) {
+ assert(rep != nullptr);
+ assert(n != 0);
+ assert(pos < rep->length);
+ assert(n <= rep->length - pos);
+ if (n == rep->length) return CordRep::Ref(rep);
+ if (rep->IsSubstring()) {
+ pos += rep->substring()->start;
+ rep = rep->substring()->child;
+ }
+ CordRepSubstring* substr = new CordRepSubstring();
+ substr->length = n;
+ substr->tag = SUBSTRING;
+ substr->start = pos;
+ substr->child = CordRep::Ref(rep);
+ return substr;
+}
+
inline void CordRepExternal::Delete(CordRep* rep) {
assert(rep != nullptr && rep->IsExternal());
auto* rep_external = static_cast<CordRepExternal*>(rep);
@@ -370,7 +411,8 @@ struct ConstInitExternalStorage {
};
template <typename Str>
-CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
+Y_ABSL_CONST_INIT CordRepExternal
+ ConstInitExternalStorage<Str>::value(Str::value);
enum {
kMaxInline = 15,
@@ -456,8 +498,8 @@ class InlineData {
// Requires the current instance to hold a tree value.
CordzInfo* cordz_info() const {
assert(is_tree());
- intptr_t info =
- static_cast<intptr_t>(y_absl::big_endian::ToHost64(as_tree_.cordz_info));
+ intptr_t info = static_cast<intptr_t>(
+ y_absl::big_endian::ToHost64(static_cast<uint64_t>(as_tree_.cordz_info)));
assert(info & 1);
return reinterpret_cast<CordzInfo*>(info - 1);
}
@@ -467,8 +509,9 @@ class InlineData {
// Requires the current instance to hold a tree value.
void set_cordz_info(CordzInfo* cordz_info) {
assert(is_tree());
- intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
- as_tree_.cordz_info = y_absl::big_endian::FromHost64(info);
+ uintptr_t info = reinterpret_cast<uintptr_t>(cordz_info) | 1;
+ as_tree_.cordz_info =
+ static_cast<cordz_info_t>(y_absl::big_endian::FromHost64(info));
}
// Resets the current cordz_info to null / empty.
@@ -568,16 +611,6 @@ class InlineData {
static_assert(sizeof(InlineData) == kMaxInline + 1, "");
-inline CordRepConcat* CordRep::concat() {
- assert(IsConcat());
- return static_cast<CordRepConcat*>(this);
-}
-
-inline const CordRepConcat* CordRep::concat() const {
- assert(IsConcat());
- return static_cast<const CordRepConcat*>(this);
-}
-
inline CordRepSubstring* CordRep::substring() {
assert(IsSubstring());
return static_cast<CordRepSubstring*>(this);
@@ -599,7 +632,9 @@ inline const CordRepExternal* CordRep::external() const {
}
inline CordRep* CordRep::Ref(CordRep* rep) {
- assert(rep != nullptr);
+ // Y_ABSL_ASSUME is a workaround for
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585
+ Y_ABSL_ASSUME(rep != nullptr);
rep->refcount.Increment();
return rep;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
index 5dcdab9cc5..e073876f8c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
@@ -22,6 +22,7 @@
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_consume.h"
#include "y_absl/strings/internal/cord_rep_flat.h"
@@ -32,7 +33,9 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-constexpr size_t CordRepBtree::kMaxCapacity; // NOLINT: needed for c++ < c++17
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepBtree::kMaxCapacity;
+#endif
namespace {
@@ -69,7 +72,7 @@ void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream,
// indentation and prefix / labels keeps us within roughly 80-100 wide.
constexpr size_t kMaxDataLength = 60;
stream << ", data = \""
- << CordRepBtree::EdgeData(r).substr(0, kMaxDataLength)
+ << EdgeData(r).substr(0, kMaxDataLength)
<< (r->length > kMaxDataLength ? "\"..." : "\"");
}
stream << '\n';
@@ -119,6 +122,7 @@ CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
rep = CordRep::Ref(substring->child);
CordRep::Unref(substring);
}
+ assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
@@ -149,7 +153,7 @@ inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
assert(length > 0);
assert(length <= edge->length);
- assert(CordRepBtree::IsDataEdge(edge));
+ assert(IsDataEdge(edge));
if (length >= edge->length) return edge;
if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
@@ -190,24 +194,29 @@ inline void FastUnref(R* r, Fn&& fn) {
}
}
-// Deletes a leaf node data edge. Requires `rep` to be an EXTERNAL or FLAT
-// node, or a SUBSTRING of an EXTERNAL or FLAT node.
-void DeleteLeafEdge(CordRep* rep) {
- for (;;) {
+
+void DeleteSubstring(CordRepSubstring* substring) {
+ CordRep* rep = substring->child;
+ if (!rep->refcount.Decrement()) {
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
- return;
- }
- if (rep->tag == EXTERNAL) {
+ } else {
+ assert(rep->tag == EXTERNAL);
CordRepExternal::Delete(rep->external());
- return;
}
- assert(rep->tag == SUBSTRING);
- CordRepSubstring* substring = rep->substring();
- rep = substring->child;
- assert(rep->tag == EXTERNAL || rep->tag >= FLAT);
- delete substring;
- if (rep->refcount.Decrement()) return;
+ }
+ delete substring;
+}
+
+// Deletes a leaf node data edge. Requires `IsDataEdge(rep)`.
+void DeleteLeafEdge(CordRep* rep) {
+ assert(IsDataEdge(rep));
+ if (rep->tag >= FLAT) {
+ CordRepFlat::Delete(rep->flat());
+ } else if (rep->tag == EXTERNAL) {
+ CordRepExternal::Delete(rep->external());
+ } else {
+ DeleteSubstring(rep->substring());
}
}
@@ -216,8 +225,8 @@ void DeleteLeafEdge(CordRep* rep) {
// propagate node changes up the stack.
template <EdgeType edge_type>
struct StackOperations {
- // Returns true if the node at 'depth' is mutable, i.e. has a refcount
- // of one, carries no CRC, and all of its parent nodes have a refcount of one.
+ // Returns true if the node at 'depth' is not shared, i.e. has a refcount
+ // of one and all of its parent nodes have a refcount of one.
inline bool owned(int depth) const { return depth < share_depth; }
// Returns the node at 'depth'.
@@ -228,11 +237,11 @@ struct StackOperations {
inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
assert(depth <= tree->height());
int current_depth = 0;
- while (current_depth < depth && tree->refcount.IsMutable()) {
+ while (current_depth < depth && tree->refcount.IsOne()) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
- share_depth = current_depth + (tree->refcount.IsMutable() ? 1 : 0);
+ share_depth = current_depth + (tree->refcount.IsOne() ? 1 : 0);
while (current_depth < depth) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
@@ -241,17 +250,17 @@ struct StackOperations {
}
// Builds a stack with the invariant that all nodes are private owned / not
- // shared and carry no CRC data. This is used in iterative updates where a
- // previous propagation guaranteed all nodes have this property.
+ // shared. This is used in iterative updates where a previous propagation
+ // guaranteed all nodes are owned / private.
inline void BuildOwnedStack(CordRepBtree* tree, int height) {
assert(height <= CordRepBtree::kMaxHeight);
int depth = 0;
while (depth < height) {
- assert(tree->refcount.IsMutable());
+ assert(tree->refcount.IsOne());
stack[depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
- assert(tree->refcount.IsMutable());
+ assert(tree->refcount.IsOne());
share_depth = depth + 1;
}
@@ -336,12 +345,12 @@ struct StackOperations {
return Unwind</*propagate=*/true>(tree, depth, length, result);
}
- // `share_depth` contains the depth at which the nodes in the stack cannot
- // be mutated. I.e., if the top most level is shared (i.e.:
- // `!refcount.IsMutable()`), then `share_depth` is 0. If the 2nd node
- // is shared (and implicitly all nodes below that) then `share_depth` is 1,
- // etc. A `share_depth` greater than the depth of the stack indicates that
- // none of the nodes in the stack are shared.
+ // `share_depth` contains the depth at which the nodes in the stack become
+ // shared. I.e., if the top most level is shared (i.e.: `!refcount.IsOne()`),
+ // then `share_depth` is 0. If the 2nd node is shared (and implicitly all
+ // nodes below that) then `share_depth` is 1, etc. A `share_depth` greater
+ // than the depth of the stack indicates that none of the nodes in the stack
+ // are shared.
int share_depth;
NodeStack stack;
@@ -372,19 +381,37 @@ void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
Dump(rep, y_absl::string_view(), false, stream);
}
-void CordRepBtree::DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end) {
- for (CordRep* edge : tree->Edges(begin, end)) {
- FastUnref(edge, DeleteLeafEdge);
+template <size_t size>
+static void DestroyTree(CordRepBtree* tree) {
+ for (CordRep* node : tree->Edges()) {
+ if (node->refcount.Decrement()) continue;
+ for (CordRep* edge : node->btree()->Edges()) {
+ if (edge->refcount.Decrement()) continue;
+ if (size == 1) {
+ DeleteLeafEdge(edge);
+ } else {
+ CordRepBtree::Destroy(edge->btree());
+ }
+ }
+ CordRepBtree::Delete(node->btree());
}
- Delete(tree);
+ CordRepBtree::Delete(tree);
}
-void CordRepBtree::DestroyNonLeaf(CordRepBtree* tree, size_t begin,
- size_t end) {
- for (CordRep* edge : tree->Edges(begin, end)) {
- FastUnref(edge->btree(), Destroy);
+void CordRepBtree::Destroy(CordRepBtree* tree) {
+ switch (tree->height()) {
+ case 0:
+ for (CordRep* edge : tree->Edges()) {
+ if (!edge->refcount.Decrement()) {
+ DeleteLeafEdge(edge);
+ }
+ }
+ return CordRepBtree::Delete(tree);
+ case 1:
+ return DestroyTree<1>(tree);
+ default:
+ return DestroyTree<2>(tree);
}
- Delete(tree);
}
bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
@@ -773,7 +800,7 @@ CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRep* front = tree->Edge(tree->begin());
- if (tree->refcount.IsMutable()) {
+ if (tree->refcount.IsOne()) {
Unref(tree->Edges(tree->begin() + 1, tree->end()));
CordRepBtree::Delete(tree);
} else {
@@ -786,7 +813,7 @@ CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
size_t new_length) {
assert(end <= tree->end());
- if (tree->refcount.IsMutable()) {
+ if (tree->refcount.IsOne()) {
Unref(tree->Edges(end, tree->end()));
tree->set_end(end);
tree->length = new_length;
@@ -813,13 +840,13 @@ CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
size_t length = len - n;
int height = tree->height();
- bool is_mutable = tree->refcount.IsMutable();
+ bool is_mutable = tree->refcount.IsOne();
// Extract all top nodes which are reduced to size = 1
Position pos = tree->IndexOfLength(length);
while (pos.index == tree->begin()) {
CordRep* edge = ExtractFront(tree);
- is_mutable &= edge->refcount.IsMutable();
+ is_mutable &= edge->refcount.IsOne();
if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
tree = edge->btree();
pos = tree->IndexOfLength(length);
@@ -835,8 +862,8 @@ CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
length = pos.n;
while (length != edge->length) {
// ConsumeBeginTo guarantees `tree` is a clean, privately owned copy.
- assert(tree->refcount.IsMutable());
- const bool edge_is_mutable = edge->refcount.IsMutable();
+ assert(tree->refcount.IsOne());
+ const bool edge_is_mutable = edge->refcount.IsOne();
if (height-- == 0) {
tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
@@ -973,7 +1000,7 @@ char CordRepBtree::GetCharacter(size_t offset) const {
Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
// The inlined version in `GetAppendBuffer()` deals with all heights <= 3.
assert(height() >= 4);
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
// Build a stack of nodes we may potentially need to update if we find a
// non-shared FLAT with capacity at the leaf level.
@@ -982,13 +1009,13 @@ Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
CordRepBtree* stack[kMaxDepth];
for (int i = 0; i < depth; ++i) {
node = node->Edge(kBack)->btree();
- if (!node->refcount.IsMutable()) return {};
+ if (!node->refcount.IsOne()) return {};
stack[i] = node;
}
// Must be a privately owned, mutable flat.
CordRep* const edge = node->Edge(kBack);
- if (!edge->refcount.IsMutable() || edge->tag < FLAT) return {};
+ if (!edge->refcount.IsOne() || edge->tag < FLAT) return {};
// Must have capacity.
const size_t avail = edge->flat()->Capacity() - edge->length;
@@ -1123,6 +1150,79 @@ CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) {
return nullptr;
}
+CordRepBtree::ExtractResult CordRepBtree::ExtractAppendBuffer(
+ CordRepBtree* tree, size_t extra_capacity) {
+ int depth = 0;
+ NodeStack stack;
+
+ // Set up default 'no success' result which is {tree, nullptr}.
+ ExtractResult result;
+ result.tree = tree;
+ result.extracted = nullptr;
+
+ // Dive down the right side of the tree, making sure no edges are shared.
+ while (tree->height() > 0) {
+ if (!tree->refcount.IsOne()) return result;
+ stack[depth++] = tree;
+ tree = tree->Edge(kBack)->btree();
+ }
+ if (!tree->refcount.IsOne()) return result;
+
+ // Validate we ended on a non shared flat.
+ CordRep* rep = tree->Edge(kBack);
+ if (!(rep->IsFlat() && rep->refcount.IsOne())) return result;
+
+ // Verify it has at least the requested extra capacity.
+ CordRepFlat* flat = rep->flat();
+ const size_t length = flat->length;
+ const size_t avail = flat->Capacity() - flat->length;
+ if (extra_capacity > avail) return result;
+
+ // Set the extracted flat in the result.
+ result.extracted = flat;
+
+ // Cascading delete all nodes that become empty.
+ while (tree->size() == 1) {
+ CordRepBtree::Delete(tree);
+ if (--depth < 0) {
+ // We consumed the entire tree: return nullptr for new tree.
+ result.tree = nullptr;
+ return result;
+ }
+ rep = tree;
+ tree = stack[depth];
+ }
+
+ // Remove the edge or cascaded up parent node.
+ tree->set_end(tree->end() - 1);
+ tree->length -= length;
+
+ // Adjust lengths up the tree.
+ while (depth > 0) {
+ tree = stack[--depth];
+ tree->length -= length;
+ }
+
+ // Remove unnecessary top nodes with size = 1. This may iterate all the way
+ // down to the leaf node in which case we simply return the remaining last
+ // edge in that node and the extracted flat.
+ while (tree->size() == 1) {
+ int height = tree->height();
+ rep = tree->Edge(kBack);
+ Delete(tree);
+ if (height == 0) {
+ // We consumed the leaf: return the sole data edge as the new tree.
+ result.tree = rep;
+ return result;
+ }
+ tree = rep->btree();
+ }
+
+ // Done: return the (new) top level node and extracted flat.
+ result.tree = tree;
+ return result;
+}
+
} // namespace cord_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
index 8c78892520..c6d01fdda3 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
@@ -22,6 +22,7 @@
#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/optimization.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_flat.h"
#include "y_absl/strings/string_view.h"
@@ -163,6 +164,9 @@ class CordRepBtree : public CordRep {
// typically after a ref_count.Decrement() on the last reference count.
static void Destroy(CordRepBtree* tree);
+ // Destruction
+ static void Delete(CordRepBtree* tree) { delete tree; }
+
// Use CordRep::Unref() as we overload for y_absl::Span<CordRep* const>.
using CordRep::Unref;
@@ -240,11 +244,41 @@ class CordRepBtree : public CordRep {
// length of the flat node and involved tree nodes have been increased by
// `span.length()`. The caller is responsible for immediately assigning values
// to all uninitialized data reference by the returned span.
- // Requires `this->refcount.IsMutable()`: this function forces the
- // caller to do this fast path check on the top level node, as this is the
- // most commonly shared node of a cord tree.
+ // Requires `this->refcount.IsOne()`: this function forces the caller to do
+ // this fast path check on the top level node, as this is the most commonly
+ // shared node of a cord tree.
Span<char> GetAppendBuffer(size_t size);
+ // Extracts the right-most data edge from this tree iff:
+ // - the tree and all internal edges to the right-most node are not shared.
+ // - the right-most node is a FLAT node and not shared.
+ // - the right-most node has at least the desired extra capacity.
+ //
+ // Returns {tree, nullptr} if any of the above conditions are not met.
+ // This method effectively removes data from the tree. The intent of this
+ // method is to allow applications appending small string data to use
+ // pre-existing capacity, and add the modified rep back to the tree.
+ //
+ // Simplified such code would look similar to this:
+ // void MyTreeBuilder::Append(string_view data) {
+ // ExtractResult result = CordRepBtree::ExtractAppendBuffer(tree_, 1);
+ // if (CordRep* rep = result.extracted) {
+ // size_t available = rep->Capacity() - rep->length;
+ // size_t n = std::min(data.size(), n);
+ // memcpy(rep->Data(), data.data(), n);
+ // rep->length += n;
+ // data.remove_prefix(n);
+ // if (!result.tree->IsBtree()) {
+ // tree_ = CordRepBtree::Create(result.tree);
+ // }
+ // tree_ = CordRepBtree::Append(tree_, rep);
+ // }
+ // ...
+ // // Remaining edge in `result.tree`.
+ // }
+ static ExtractResult ExtractAppendBuffer(CordRepBtree* tree,
+ size_t extra_capacity = 1);
+
// Returns the `height` of the tree. The height of a tree is limited to
// kMaxHeight. `height` is implemented as an `int` as in some places we
// use negative (-1) values for 'data edges'.
@@ -277,13 +311,6 @@ class CordRepBtree : public CordRep {
// Requires this instance to be a leaf node, and `index` to be valid index.
inline y_absl::string_view Data(size_t index) const;
- static const char* EdgeDataPtr(const CordRep* r);
- static y_absl::string_view EdgeData(const CordRep* r);
-
- // Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
- // holding a FLAT or EXTERNAL child rep.
- static bool IsDataEdge(const CordRep* rep);
-
// Diagnostics: returns true if `tree` is valid and internally consistent.
// If `shallow` is false, then the provided top level node and all child nodes
// below it are recursively checked. If `shallow` is true, only the provided
@@ -410,12 +437,6 @@ class CordRepBtree : public CordRep {
// Requires `offset` < length.
Position IndexBeyond(size_t offset) const;
- // Destruction
- static void DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end);
- static void DestroyNonLeaf(CordRepBtree* tree, size_t begin, size_t end);
- static void DestroyTree(CordRepBtree* tree, size_t begin, size_t end);
- static void Delete(CordRepBtree* tree) { delete tree; }
-
// Creates a new leaf node containing as much data as possible from `data`.
// The data is added either forwards or reversed depending on `edge_type`.
// Callers must check the length of the returned node to determine if all data
@@ -604,34 +625,11 @@ inline y_absl::Span<CordRep* const> CordRepBtree::Edges(size_t begin,
return {edges_ + begin, static_cast<size_t>(end - begin)};
}
-inline const char* CordRepBtree::EdgeDataPtr(const CordRep* r) {
- assert(IsDataEdge(r));
- size_t offset = 0;
- if (r->tag == SUBSTRING) {
- offset = r->substring()->start;
- r = r->substring()->child;
- }
- return (r->tag >= FLAT ? r->flat()->Data() : r->external()->base) + offset;
-}
-
-inline y_absl::string_view CordRepBtree::EdgeData(const CordRep* r) {
- return y_absl::string_view(EdgeDataPtr(r), r->length);
-}
-
inline y_absl::string_view CordRepBtree::Data(size_t index) const {
assert(height() == 0);
return EdgeData(Edge(index));
}
-inline bool CordRepBtree::IsDataEdge(const CordRep* rep) {
- // The fast path is that `rep` is an EXTERNAL or FLAT node, making the below
- // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
- // check in the slow path the SUBSTRING check to optimize for the hot path.
- if (rep->tag == EXTERNAL || rep->tag >= FLAT) return true;
- if (rep->tag == SUBSTRING) rep = rep->substring()->child;
- return rep->tag == EXTERNAL || rep->tag >= FLAT;
-}
-
inline CordRepBtree* CordRepBtree::New(int height) {
CordRepBtree* tree = new CordRepBtree;
tree->length = 0;
@@ -659,19 +657,6 @@ inline CordRepBtree* CordRepBtree::New(CordRepBtree* front,
return tree;
}
-inline void CordRepBtree::DestroyTree(CordRepBtree* tree, size_t begin,
- size_t end) {
- if (tree->height() == 0) {
- DestroyLeaf(tree, begin, end);
- } else {
- DestroyNonLeaf(tree, begin, end);
- }
-}
-
-inline void CordRepBtree::Destroy(CordRepBtree* tree) {
- DestroyTree(tree, tree->begin(), tree->end());
-}
-
inline void CordRepBtree::Unref(y_absl::Span<CordRep* const> edges) {
for (CordRep* edge : edges) {
if (Y_ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) {
@@ -731,7 +716,7 @@ inline void CordRepBtree::AlignBegin() {
// size, and then do overlapping load/store of up to 4 pointers (inlined as
// XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a)
// compact and b) not clobbering any registers.
- Y_ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+ Y_ABSL_ASSUME(new_end <= kMaxCapacity);
#ifdef __clang__
#pragma unroll 1
#endif
@@ -749,7 +734,7 @@ inline void CordRepBtree::AlignEnd() {
const size_t new_end = end() + delta;
set_begin(new_begin);
set_end(new_end);
- Y_ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+ Y_ABSL_ASSUME(new_end <= kMaxCapacity);
#ifdef __clang__
#pragma unroll 1
#endif
@@ -849,7 +834,7 @@ inline CordRepBtree* CordRepBtree::Create(CordRep* rep) {
}
inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
CordRepBtree* tree = this;
const int height = this->height();
CordRepBtree* n1 = tree;
@@ -858,21 +843,21 @@ inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
switch (height) {
case 3:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
n2 = tree;
Y_ABSL_FALLTHROUGH_INTENDED;
case 2:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
n1 = tree;
Y_ABSL_FALLTHROUGH_INTENDED;
case 1:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
Y_ABSL_FALLTHROUGH_INTENDED;
case 0:
CordRep* edge = tree->Edge(kBack);
- if (!edge->refcount.IsMutable()) return {};
+ if (!edge->refcount.IsOne()) return {};
if (edge->tag < FLAT) return {};
size_t avail = edge->flat()->Capacity() - edge->length;
if (avail == 0) return {};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
index a94d4e0ae6..861a3c034b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.cc
@@ -16,6 +16,7 @@
#include <cassert>
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
@@ -39,7 +40,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
assert(n <= rep->length);
assert(offset < rep->length);
assert(offset <= rep->length - n);
- assert(CordRepBtree::IsDataEdge(rep));
+ assert(IsDataEdge(rep));
if (n == 0) return nullptr;
if (n == rep->length) return CordRep::Ref(rep);
@@ -49,6 +50,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
rep = rep->substring()->child;
}
+ assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h
index f4c09864e0..1508126e97 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_navigator.h
@@ -143,8 +143,8 @@ class CordRepBtreeNavigator {
// `index_` and `node_` contain the navigation state as the 'path' to the
// current data edge which is at `node_[0]->Edge(index_[0])`. The contents
// of these are undefined until the instance is initialized (`height_ >= 0`).
- uint8_t index_[CordRepBtree::kMaxHeight];
- CordRepBtree* node_[CordRepBtree::kMaxHeight];
+ uint8_t index_[CordRepBtree::kMaxDepth];
+ CordRepBtree* node_[CordRepBtree::kMaxDepth];
};
// Returns true if this instance is not empty.
@@ -173,6 +173,7 @@ template <CordRepBtree::EdgeType edge_type>
inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) {
assert(tree != nullptr);
assert(tree->size() > 0);
+ assert(tree->height() <= CordRepBtree::kMaxHeight);
int height = height_ = tree->height();
size_t index = tree->index(edge_type);
node_[height] = tree;
@@ -206,6 +207,7 @@ inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek(
inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset(
CordRepBtree* tree, size_t offset) {
assert(tree != nullptr);
+ assert(tree->height() <= CordRepBtree::kMaxHeight);
if (Y_ABSL_PREDICT_FALSE(offset >= tree->length)) return {nullptr, 0};
height_ = tree->height();
node_[height_] = tree;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
index 53010139e2..ee2ef490ea 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.cc
@@ -17,6 +17,7 @@
#include <cassert>
#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
#include "y_absl/strings/internal/cord_rep_btree_navigator.h"
@@ -44,7 +45,7 @@ y_absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
// can directly return the substring into the current data edge as the next
// chunk. We can easily establish from the above code that `navigator_.Next()`
// has not been called as that requires `chunk_size` to be zero.
- if (n < chunk_size) return CordRepBtree::EdgeData(edge).substr(result.n);
+ if (n < chunk_size) return EdgeData(edge).substr(result.n);
// The amount of data taken from the last edge is `chunk_size` and `result.n`
// contains the offset into the current edge trailing the read data (which can
@@ -60,7 +61,7 @@ y_absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
// We did not read all data, return remaining data from current edge.
edge = navigator_.Current();
remaining_ -= consumed_by_read + edge->length;
- return CordRepBtree::EdgeData(edge).substr(result.n);
+ return EdgeData(edge).substr(result.n);
}
} // namespace cord_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h
index d5473df29d..78d18ee7eb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree_reader.h
@@ -18,6 +18,7 @@
#include <cassert>
#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_data_edge.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
#include "y_absl/strings/internal/cord_rep_btree_navigator.h"
@@ -167,7 +168,7 @@ inline y_absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) {
assert(tree != nullptr);
const CordRep* edge = navigator_.InitFirst(tree);
remaining_ = tree->length - edge->length;
- return CordRepBtree::EdgeData(edge);
+ return EdgeData(edge);
}
inline y_absl::string_view CordRepBtreeReader::Next() {
@@ -175,7 +176,7 @@ inline y_absl::string_view CordRepBtreeReader::Next() {
const CordRep* edge = navigator_.Next();
assert(edge != nullptr);
remaining_ -= edge->length;
- return CordRepBtree::EdgeData(edge);
+ return EdgeData(edge);
}
inline y_absl::string_view CordRepBtreeReader::Skip(size_t skip) {
@@ -190,7 +191,7 @@ inline y_absl::string_view CordRepBtreeReader::Skip(size_t skip) {
// The combined length of all edges skipped before `pos.edge` is `skip -
// pos.offset`, all of which are 'consumed', as well as the current edge.
remaining_ -= skip - pos.offset + pos.edge->length;
- return CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+ return EdgeData(pos.edge).substr(pos.offset);
}
inline y_absl::string_view CordRepBtreeReader::Seek(size_t offset) {
@@ -199,7 +200,7 @@ inline y_absl::string_view CordRepBtreeReader::Seek(size_t offset) {
remaining_ = 0;
return {};
}
- y_absl::string_view chunk = CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+ y_absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset);
remaining_ = length() - offset - chunk.length();
return chunk;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
index 02a142d70a..a72823ece5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
@@ -40,88 +40,21 @@ CordRep* ClipSubstring(CordRepSubstring* substring) {
return child;
}
-// Unrefs the provided `concat`, and returns `{concat->left, concat->right}`
-// Adds or assumes a reference on `concat->left` and `concat->right`.
-// Returns an array of 2 elements containing the left and right nodes.
-std::array<CordRep*, 2> ClipConcat(CordRepConcat* concat) {
- std::array<CordRep*, 2> result{concat->left, concat->right};
- if (concat->refcount.IsOne()) {
- delete concat;
- } else {
- CordRep::Ref(result[0]);
- CordRep::Ref(result[1]);
- CordRep::Unref(concat);
- }
- return result;
-}
+} // namespace
-void Consume(bool forward, CordRep* rep, ConsumeFn consume_fn) {
+void Consume(CordRep* rep, ConsumeFn consume_fn) {
size_t offset = 0;
size_t length = rep->length;
- struct Entry {
- CordRep* rep;
- size_t offset;
- size_t length;
- };
- y_absl::InlinedVector<Entry, 40> stack;
-
- for (;;) {
- if (rep->tag == CONCAT) {
- std::array<CordRep*, 2> res = ClipConcat(rep->concat());
- CordRep* left = res[0];
- CordRep* right = res[1];
-
- if (left->length <= offset) {
- // Don't need left node
- offset -= left->length;
- CordRep::Unref(left);
- rep = right;
- continue;
- }
- size_t length_left = left->length - offset;
- if (length_left >= length) {
- // Don't need right node
- CordRep::Unref(right);
- rep = left;
- continue;
- }
-
- // Need both nodes
- size_t length_right = length - length_left;
- if (forward) {
- stack.push_back({right, 0, length_right});
- rep = left;
- length = length_left;
- } else {
- stack.push_back({left, offset, length_left});
- rep = right;
- offset = 0;
- length = length_right;
- }
- } else if (rep->tag == SUBSTRING) {
- offset += rep->substring()->start;
- rep = ClipSubstring(rep->substring());
- } else {
- consume_fn(rep, offset, length);
- if (stack.empty()) return;
-
- rep = stack.back().rep;
- offset = stack.back().offset;
- length = stack.back().length;
- stack.pop_back();
- }
+ if (rep->tag == SUBSTRING) {
+ offset += rep->substring()->start;
+ rep = ClipSubstring(rep->substring());
}
-}
-
-} // namespace
-
-void Consume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(true, rep, std::move(consume_fn));
+ consume_fn(rep, offset, length);
}
void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(false, rep, std::move(consume_fn));
+ return Consume(rep, std::move(consume_fn));
}
} // namespace cord_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.cc
new file mode 100644
index 0000000000..a2f6382f72
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.cc
@@ -0,0 +1,54 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/strings/internal/cord_rep_crc.h"
+
+#include <cassert>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+CordRepCrc* CordRepCrc::New(CordRep* child, uint32_t crc) {
+ assert(child != nullptr);
+ if (child->IsCrc()) {
+ if (child->refcount.IsOne()) {
+ child->crc()->crc = crc;
+ return child->crc();
+ }
+ CordRep* old = child;
+ child = old->crc()->child;
+ CordRep::Ref(child);
+ CordRep::Unref(old);
+ }
+ auto* new_cordrep = new CordRepCrc;
+ new_cordrep->length = child->length;
+ new_cordrep->tag = cord_internal::CRC;
+ new_cordrep->child = child;
+ new_cordrep->crc = crc;
+ return new_cordrep;
+}
+
+void CordRepCrc::Destroy(CordRepCrc* node) {
+ CordRep::Unref(node->child);
+ delete node;
+}
+
+} // namespace cord_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.h
new file mode 100644
index 0000000000..15d22fa248
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_crc.h
@@ -0,0 +1,102 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef Y_ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+#define Y_ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+
+#include <cassert>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/strings/internal/cord_internal.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepCrc is a CordRep node intended only to appear at the top level of a
+// cord tree. It associates an "expected CRC" with the contained data, to allow
+// for easy passage of checksum data in Cord data flows.
+//
+// From Cord's perspective, the crc value has no semantics; any validation of
+// the contained checksum is the user's responsibility.
+struct CordRepCrc : public CordRep {
+ CordRep* child;
+ uint32_t crc;
+
+ // Consumes `child` and returns a CordRepCrc prefixed tree containing `child`.
+ // If the specified `child` is itself a CordRepCrc node, then this method
+ // either replaces the existing node, or directly updates the crc value in it
+ // depending on the node being shared or not, i.e.: refcount.IsOne().
+ // `child` must not be null. Never returns null.
+ static CordRepCrc* New(CordRep* child, uint32_t crc);
+
+ // Destroys (deletes) the provided node. `node` must not be null.
+ static void Destroy(CordRepCrc* node);
+};
+
+// Consumes `rep` and returns a CordRep* with any outer CordRepCrc wrapper
+// removed. This is usually a no-op (returning `rep`), but this will remove and
+// unref an outer CordRepCrc node.
+inline CordRep* RemoveCrcNode(CordRep* rep) {
+ assert(rep != nullptr);
+ if (Y_ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ CordRep* child = rep->crc()->child;
+ if (rep->refcount.IsOne()) {
+ delete rep->crc();
+ } else {
+ CordRep::Ref(child);
+ CordRep::Unref(rep);
+ }
+ return child;
+ }
+ return rep;
+}
+
+// Returns `rep` if it is not a CordRepCrc node, or its child if it is.
+// Does not consume or create a reference on `rep` or the returned value.
+inline CordRep* SkipCrcNode(CordRep* rep) {
+ assert(rep != nullptr);
+ if (Y_ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ return rep->crc()->child;
+ } else {
+ return rep;
+ }
+}
+
+inline const CordRep* SkipCrcNode(const CordRep* rep) {
+ assert(rep != nullptr);
+ if (Y_ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ return rep->crc()->child;
+ } else {
+ return rep;
+ }
+}
+
+inline CordRepCrc* CordRep::crc() {
+ assert(IsCrc());
+ return static_cast<CordRepCrc*>(this);
+}
+
+inline const CordRepCrc* CordRep::crc() const {
+ assert(IsCrc());
+ return static_cast<const CordRepCrc*>(this);
+}
+
+} // namespace cord_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
index bcd5bf428e..b5ac831903 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
@@ -20,6 +20,8 @@
#include <cstdint>
#include <memory>
+#include "y_absl/base/config.h"
+#include "y_absl/base/macros.h"
#include "y_absl/strings/internal/cord_internal.h"
namespace y_absl {
@@ -42,23 +44,45 @@ static constexpr size_t kMinFlatSize = 32;
static constexpr size_t kMaxFlatSize = 4096;
static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
+static constexpr size_t kMaxLargeFlatSize = 256 * 1024;
+static constexpr size_t kMaxLargeFlatLength = kMaxLargeFlatSize - kFlatOverhead;
+// kTagBase should make the Size <--> Tag computation resilient
+// against changes to the value of FLAT when we add a new tag..
+static constexpr uint8_t kTagBase = FLAT - 4;
+
+// Converts the provided rounded size to the corresponding tag
constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
- return static_cast<uint8_t>((size <= 1024) ? size / 8 + 1
- : 129 + size / 32 - 1024 / 32);
+ return static_cast<uint8_t>(size <= 512 ? kTagBase + size / 8
+ : size <= 8192
+ ? kTagBase + 512 / 8 + size / 64 - 512 / 64
+ : kTagBase + 512 / 8 + ((8192 - 512) / 64) +
+ size / 4096 - 8192 / 4096);
+}
+
+// Converts the provided tag to the corresponding allocated size
+constexpr size_t TagToAllocatedSize(uint8_t tag) {
+ return (tag <= kTagBase + 512 / 8) ? tag * 8 - kTagBase * 8
+ : (tag <= kTagBase + (512 / 8) + ((8192 - 512) / 64))
+ ? 512 + tag * 64 - kTagBase * 64 - 512 / 8 * 64
+ : 8192 + tag * 4096 - kTagBase * 4096 -
+ ((512 / 8) + ((8192 - 512) / 64)) * 4096;
}
-static_assert(kMinFlatSize / 8 + 1 >= FLAT, "");
-static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, "");
+static_assert(AllocatedSizeToTagUnchecked(kMinFlatSize) == FLAT, "");
+static_assert(AllocatedSizeToTagUnchecked(kMaxLargeFlatSize) == MAX_FLAT_TAG,
+ "");
-// Helper functions for rounded div, and rounding to exact sizes.
-constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
-constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
+// RoundUp logically performs `((n + m - 1) / m) * m` to round up to the nearest
+// multiple of `m`, optimized for the invariant that `m` is a power of 2.
+constexpr size_t RoundUp(size_t n, size_t m) {
+ return (n + m - 1) & (0 - m);
+}
// Returns the size to the nearest equal or larger value that can be
// expressed exactly as a tag value.
inline size_t RoundUpForTag(size_t size) {
- return RoundUp(size, (size <= 1024) ? 8 : 32);
+ return RoundUp(size, (size <= 512) ? 8 : (size <= 8192 ? 64 : 4096));
}
// Converts the allocated size to a tag, rounding down if the size
@@ -71,26 +95,26 @@ inline uint8_t AllocatedSizeToTag(size_t size) {
return tag;
}
-// Converts the provided tag to the corresponding allocated size
-constexpr size_t TagToAllocatedSize(uint8_t tag) {
- return (tag <= 129) ? ((tag - 1) * 8) : (1024 + (tag - 129) * 32);
-}
-
// Converts the provided tag to the corresponding available data length
constexpr size_t TagToLength(uint8_t tag) {
return TagToAllocatedSize(tag) - kFlatOverhead;
}
// Enforce that kMaxFlatSize maps to a well-known exact tag value.
-static_assert(TagToAllocatedSize(225) == kMaxFlatSize, "Bad tag logic");
+static_assert(TagToAllocatedSize(MAX_FLAT_TAG) == kMaxLargeFlatSize,
+ "Bad tag logic");
struct CordRepFlat : public CordRep {
+ // Tag for explicit 'large flat' allocation
+ struct Large {};
+
// Creates a new flat node.
- static CordRepFlat* New(size_t len) {
+ template <size_t max_flat_size, typename... Args>
+ static CordRepFlat* NewImpl(size_t len, Args... args Y_ABSL_ATTRIBUTE_UNUSED) {
if (len <= kMinFlatLength) {
len = kMinFlatLength;
- } else if (len > kMaxFlatLength) {
- len = kMaxFlatLength;
+ } else if (len > max_flat_size - kFlatOverhead) {
+ len = max_flat_size - kFlatOverhead;
}
// Round size up so it matches a size we can exactly express in a tag.
@@ -101,6 +125,12 @@ struct CordRepFlat : public CordRep {
return rep;
}
+ static CordRepFlat* New(size_t len) { return NewImpl<kMaxFlatSize>(len); }
+
+ static CordRepFlat* New(Large, size_t len) {
+ return NewImpl<kMaxLargeFlatSize>(len);
+ }
+
// Deletes a CordRepFlat instance created previously through a call to New().
// Flat CordReps are allocated and constructed with raw ::operator new and
// placement new, and must be destructed and deallocated accordingly.
@@ -117,6 +147,17 @@ struct CordRepFlat : public CordRep {
#endif
}
+ // Create a CordRepFlat containing `data`, with an optional additional
+ // extra capacity of up to `extra` bytes. Requires that `data.size()`
+ // is less than kMaxFlatLength.
+ static CordRepFlat* Create(y_absl::string_view data, size_t extra = 0) {
+ assert(data.size() <= kMaxFlatLength);
+ CordRepFlat* flat = New(data.size() + (std::min)(extra, kMaxFlatLength));
+ memcpy(flat->Data(), data.data(), data.size());
+ flat->length = data.size();
+ return flat;
+ }
+
// Returns a pointer to the data inside this flat rep.
char* Data() { return reinterpret_cast<char*>(storage); }
const char* Data() const { return reinterpret_cast<const char*>(storage); }
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
index d961db7192..f05aa5ffa7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.cc
@@ -129,7 +129,9 @@ class CordRepRing::Filler {
index_type pos_;
};
-constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepRing::kMaxCapacity;
+#endif
bool CordRepRing::IsValid(std::ostream& output) const {
if (capacity_ == 0) {
@@ -277,7 +279,7 @@ CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
// Get current number of entries, and check for max capacity.
size_t entries = rep->entries();
- if (!rep->refcount.IsMutable()) {
+ if (!rep->refcount.IsOne()) {
return Copy(rep, rep->head(), rep->tail(), extra);
} else if (entries + extra > rep->capacity()) {
const size_t min_grow = rep->capacity() + rep->capacity() / 2;
@@ -292,10 +294,10 @@ CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
}
Span<char> CordRepRing::GetAppendBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
index_type back = retreat(tail_);
CordRep* child = entry_child(back);
- if (child->tag >= FLAT && child->refcount.IsMutable()) {
+ if (child->tag >= FLAT && child->refcount.IsOne()) {
size_t capacity = child->flat()->Capacity();
pos_type end_pos = entry_end_pos(back);
size_t data_offset = entry_data_offset(back);
@@ -312,10 +314,10 @@ Span<char> CordRepRing::GetAppendBuffer(size_t size) {
}
Span<char> CordRepRing::GetPrependBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
CordRep* child = entry_child(head_);
size_t data_offset = entry_data_offset(head_);
- if (data_offset && child->refcount.IsMutable() && child->tag >= FLAT) {
+ if (data_offset && child->refcount.IsOne() && child->tag >= FLAT) {
size_t n = (std::min)(data_offset, size);
this->length += n;
begin_pos_ -= n;
@@ -504,7 +506,7 @@ CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
CordRepRing* CordRepRing::Append(CordRepRing* rep, y_absl::string_view data,
size_t extra) {
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
Span<char> avail = rep->GetAppendBuffer(data.length());
if (!avail.empty()) {
memcpy(avail.data(), data.data(), avail.length());
@@ -538,7 +540,7 @@ CordRepRing* CordRepRing::Append(CordRepRing* rep, y_absl::string_view data,
CordRepRing* CordRepRing::Prepend(CordRepRing* rep, y_absl::string_view data,
size_t extra) {
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
Span<char> avail = rep->GetPrependBuffer(data.length());
if (!avail.empty()) {
const char* tail = data.data() + data.length() - avail.length();
@@ -678,7 +680,7 @@ CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
Position tail = rep->FindTail(head.index, offset + len);
const size_t new_entries = rep->entries(head.index, tail.index);
- if (rep->refcount.IsMutable() && extra <= (rep->capacity() - new_entries)) {
+ if (rep->refcount.IsOne() && extra <= (rep->capacity() - new_entries)) {
// We adopt a privately owned rep and no extra entries needed.
if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
@@ -715,7 +717,7 @@ CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
}
Position head = rep->Find(len);
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
rep->head_ = head.index;
} else {
@@ -745,7 +747,7 @@ CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
}
Position tail = rep->FindTail(rep->length - len);
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
// We adopt a privately owned rep, scrub.
if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
rep->tail_ = tail.index;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
index ae35341f2a..bc257c9be2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
@@ -20,6 +20,7 @@
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/strings/internal/cord_internal.h"
#include "y_absl/strings/internal/cord_rep_btree.h"
+#include "y_absl/strings/internal/cord_rep_crc.h"
#include "y_absl/strings/internal/cord_rep_ring.h"
#include "y_absl/strings/internal/cordz_handle.h"
#include "y_absl/strings/internal/cordz_statistics.h"
@@ -33,7 +34,9 @@ namespace cord_internal {
using ::y_absl::base_internal::SpinLockHolder;
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int CordzInfo::kMaxStackDepth;
+#endif
Y_ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{y_absl::kConstInit};
@@ -81,6 +84,14 @@ class CordRepAnalyzer {
size_t refcount = rep->refcount.Get();
RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
+ // Process the top level CRC node, if present.
+ if (repref.rep->tag == CRC) {
+ statistics_.node_count++;
+ statistics_.node_counts.crc++;
+ memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);
+ repref = repref.Child(repref.rep->crc()->child);
+ }
+
// Process all top level linear nodes (substrings and flats).
repref = CountLinearReps(repref, memory_usage_);
@@ -89,8 +100,6 @@ class CordRepAnalyzer {
AnalyzeRing(repref);
} else if (repref.rep->tag == BTREE) {
AnalyzeBtree(repref);
- } else if (repref.rep->tag == CONCAT) {
- AnalyzeConcat(repref);
} else {
// We should have either a concat, btree, or ring node if not null.
assert(false);
@@ -132,14 +141,6 @@ class CordRepAnalyzer {
}
};
- // Returns `rr` if `rr.rep` is not null and a CONCAT type.
- // Asserts that `rr.rep` is a concat node or null.
- static RepRef AssertConcat(RepRef repref) {
- const CordRep* rep = repref.rep;
- assert(rep == nullptr || rep->tag == CONCAT);
- return (rep != nullptr && rep->tag == CONCAT) ? repref : RepRef{nullptr, 0};
- }
-
// Counts a flat of the provide allocated size
void CountFlat(size_t size) {
statistics_.node_count++;
@@ -192,34 +193,6 @@ class CordRepAnalyzer {
return rep;
}
- // Analyzes the provided concat node in a flattened recursive way.
- void AnalyzeConcat(RepRef rep) {
- y_absl::InlinedVector<RepRef, 47> pending;
-
- while (rep.rep != nullptr) {
- const CordRepConcat* concat = rep.rep->concat();
- RepRef left = rep.Child(concat->left);
- RepRef right = rep.Child(concat->right);
-
- statistics_.node_count++;
- statistics_.node_counts.concat++;
- memory_usage_.Add(sizeof(CordRepConcat), rep.refcount);
-
- right = AssertConcat(CountLinearReps(right, memory_usage_));
- rep = AssertConcat(CountLinearReps(left, memory_usage_));
- if (rep.rep != nullptr) {
- if (right.rep != nullptr) {
- pending.push_back(right);
- }
- } else if (right.rep != nullptr) {
- rep = right;
- } else if (!pending.empty()) {
- rep = pending.back();
- pending.pop_back();
- }
- }
- }
-
// Analyzes the provided ring.
void AnalyzeRing(RepRef rep) {
statistics_.node_count++;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h
index 29ca3d1fcc..01194adeed 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_statistics.h
@@ -41,6 +41,7 @@ struct CordzStatistics {
size_t concat = 0; // #concat reps
size_t ring = 0; // #ring buffer reps
size_t btree = 0; // #btree reps
+ size_t crc = 0; // #crc reps
};
// The size of the cord in bytes. This matches the result of Cord::size().
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h
index bb9fae0e6f..0ff3eae6e8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_update_tracker.h
@@ -39,8 +39,8 @@ class CordzUpdateTracker {
// Tracked update methods.
enum MethodIdentifier {
kUnknown,
- kAppendBuffer,
kAppendCord,
+ kAppendCordBuffer,
kAppendExternalMemory,
kAppendString,
kAssignCord,
@@ -50,16 +50,18 @@ class CordzUpdateTracker {
kConstructorString,
kCordReader,
kFlatten,
+ kGetAppendBuffer,
kGetAppendRegion,
kMakeCordFromExternal,
kMoveAppendCord,
kMoveAssignCord,
kMovePrependCord,
- kPrependBuffer,
kPrependCord,
+ kPrependCordBuffer,
kPrependString,
kRemovePrefix,
kRemoveSuffix,
+ kSetExpectedChecksum,
kSubCord,
// kNumMethods defines the number of entries: must be the last entry.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
index 1105bbe1f9..9e7c766565 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
@@ -21,7 +21,7 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace strings_internal {
-const char kBase64Chars[] =
+Y_ABSL_CONST_INIT const char kBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
@@ -102,8 +102,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
}
}
// To save time, we didn't update szdest or szsrc in the loop. So do it now.
- szdest = limit_dest - cur_dest;
- szsrc = limit_src - cur_src;
+ szdest = static_cast<size_t>(limit_dest - cur_dest);
+ szsrc = static_cast<size_t>(limit_src - cur_src);
/* now deal with the tail (<=3 bytes) */
switch (szsrc) {
@@ -154,7 +154,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
// the loop because the loop above always reads 4 bytes, and the fourth
// byte is past the end of the input.
if (szdest < 4) return 0;
- uint32_t in = (cur_src[0] << 16) + y_absl::big_endian::Load16(cur_src + 1);
+ uint32_t in =
+ (uint32_t{cur_src[0]} << 16) + y_absl::big_endian::Load16(cur_src + 1);
cur_dest[0] = base64[in >> 18];
in &= 0x3FFFF;
cur_dest[1] = base64[in >> 12];
@@ -172,7 +173,7 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
Y_ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
break;
}
- return (cur_dest - dest);
+ return static_cast<size_t>(cur_dest - dest);
}
} // namespace strings_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc
index c850f8abb1..2c23a18e12 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/ostringstream.cc
@@ -27,7 +27,7 @@ OStringStream::Buf::int_type OStringStream::overflow(int c) {
std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
assert(s_);
- s_->append(s, n);
+ s_->append(s, static_cast<size_t>(n));
return n;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
index 5f4399c9da..0d503208f0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
@@ -320,7 +320,7 @@ bool ConvertIntArg(T v, const FormatConversionSpecImpl conv,
return ConvertFloatImpl(static_cast<double>(v), conv, sink);
default:
- Y_ABSL_INTERNAL_ASSUME(false);
+ Y_ABSL_ASSUME(false);
}
if (conv.is_basic()) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
index 67ac59122b..f6621166e5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
@@ -145,7 +145,7 @@ StringConvertResult FormatConvertImpl(const AbslCord& value,
size_t space_remaining = 0;
int width = conv.width();
- if (width >= 0) space_remaining = width;
+ if (width >= 0) space_remaining = static_cast<size_t>(width);
size_t to_write = value.size();
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
index 4f254951fd..3688b503af 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
@@ -25,6 +25,7 @@
#include "y_absl/strings/internal/str_format/checker.h"
#include "y_absl/strings/internal/str_format/parser.h"
#include "y_absl/types/span.h"
+#include "y_absl/utility/utility.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -87,6 +88,36 @@ class FormatSpecTemplate
: public MakeDependent<UntypedFormatSpec, Args...>::type {
using Base = typename MakeDependent<UntypedFormatSpec, Args...>::type;
+ template <bool res>
+ struct ErrorMaker {
+ constexpr bool operator()(int) const { return res; }
+ };
+
+ template <int i, int j>
+ static constexpr bool CheckArity(ErrorMaker<true> SpecifierCount = {},
+ ErrorMaker<i == j> ParametersPassed = {}) {
+ static_assert(SpecifierCount(i) == ParametersPassed(j),
+ "Number of arguments passed must match the number of "
+ "conversion specifiers.");
+ return true;
+ }
+
+ template <FormatConversionCharSet specified, FormatConversionCharSet passed,
+ int arg>
+ static constexpr bool CheckMatch(
+ ErrorMaker<Contains(specified, passed)> MismatchedArgumentNumber = {}) {
+ static_assert(MismatchedArgumentNumber(arg),
+ "Passed argument must match specified format.");
+ return true;
+ }
+
+ template <FormatConversionCharSet... C, size_t... I>
+ static bool CheckMatches(y_absl::index_sequence<I...>) {
+ bool res[] = {true, CheckMatch<Args, C, I + 1>()...};
+ (void)res;
+ return true;
+ }
+
public:
#ifdef Y_ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
@@ -112,7 +143,8 @@ class FormatSpecTemplate
template <typename T = void>
FormatSpecTemplate(string_view s) // NOLINT
__attribute__((enable_if(str_format_internal::EnsureConstexpr(s),
- "constexpr trap"))) {
+ "constexpr trap")))
+ : Base("to avoid noise in the compiler error") {
static_assert(sizeof(T*) == 0,
"Format specified does not match the arguments passed.");
}
@@ -133,13 +165,12 @@ class FormatSpecTemplate
#endif // Y_ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
- template <
- FormatConversionCharSet... C,
- typename = typename std::enable_if<sizeof...(C) == sizeof...(Args)>::type,
- typename = typename std::enable_if<AllOf(Contains(Args,
- C)...)>::type>
+ template <FormatConversionCharSet... C>
FormatSpecTemplate(const ExtendedParsedFormat<C...>& pc) // NOLINT
- : Base(&pc) {}
+ : Base(&pc) {
+ CheckArity<sizeof...(C), sizeof...(Args)>();
+ CheckMatches<C...>(y_absl::make_index_sequence<sizeof...(C)>{});
+ }
};
class Streamable {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h
index a10724d870..e262c6ba87 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/checker.h
@@ -22,9 +22,14 @@
// Compile time check support for entry points.
#ifndef Y_ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
-#if Y_ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+// We disable format checker under vscode intellisense compilation.
+// See https://github.com/microsoft/vscode-cpptools/issues/3683 for
+// more details.
+#if Y_ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && \
+ !defined(__INTELLISENSE__)
#define Y_ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1
-#endif // Y_ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+#endif // Y_ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) &&
+ // !defined(__INTELLISENSE__)
#endif // Y_ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
namespace y_absl {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc
index 24e0ec7a13..db6182f35d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.cc
@@ -33,6 +33,8 @@ TString FlagsToString(Flags v) {
return s;
}
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
#define Y_ABSL_INTERNAL_X_VAL(id) \
constexpr y_absl::FormatConversionChar FormatConversionCharInternal::id;
Y_ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(Y_ABSL_INTERNAL_X_VAL, )
@@ -45,17 +47,14 @@ constexpr y_absl::FormatConversionChar FormatConversionCharInternal::kNone;
Y_ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(Y_ABSL_INTERNAL_CHAR_SET_CASE, )
#undef Y_ABSL_INTERNAL_CHAR_SET_CASE
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
+#endif // Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
bool FormatSinkImpl::PutPaddedString(string_view value, int width,
int precision, bool left) {
size_t space_remaining = 0;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
index 836bd1a10c..cc1a5d6bad 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
@@ -19,6 +19,7 @@
#include <limits.h>
#include <cstddef>
+#include <cstdint>
#include <cstring>
#include <ostream>
@@ -70,7 +71,7 @@ class FormatSinkImpl {
~FormatSinkImpl() { Flush(); }
void Flush() {
- raw_.Write(string_view(buf_, pos_ - buf_));
+ raw_.Write(string_view(buf_, static_cast<size_t>(pos_ - buf_)));
pos_ = buf_;
}
@@ -120,7 +121,9 @@ class FormatSinkImpl {
}
private:
- size_t Avail() const { return buf_ + sizeof(buf_) - pos_; }
+ size_t Avail() const {
+ return static_cast<size_t>(buf_ + sizeof(buf_) - pos_);
+ }
FormatRawSinkImpl raw_;
size_t size_ = 0;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h
index 7732b5e5e1..e6bebabf9c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/output.h
@@ -22,6 +22,7 @@
#define Y_ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
#include <cstdio>
+#include <ios>
#include <ostream>
#include <util/generic/string.h>
@@ -71,7 +72,7 @@ inline void AbslFormatFlush(TString* out, string_view s) {
out->append(s.data(), s.size());
}
inline void AbslFormatFlush(std::ostream* out, string_view s) {
- out->write(s.data(), s.size());
+ out->write(s.data(), static_cast<std::streamsize>(s.size()));
}
inline void AbslFormatFlush(FILERawSink* sink, string_view v) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h
index 68b6654be9..16bc0b12be 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/parser.h
@@ -151,7 +151,8 @@ bool ParseFormatString(string_view src, Consumer consumer) {
const char* p = src.data();
const char* const end = p + src.size();
while (p != end) {
- const char* percent = static_cast<const char*>(memchr(p, '%', end - p));
+ const char* percent =
+ static_cast<const char*>(memchr(p, '%', static_cast<size_t>(end - p)));
if (!percent) {
// We found the last substring.
return consumer.Append(string_view(p, end - p));
@@ -242,7 +243,8 @@ class ParsedFormatBase {
string_view text(base, 0);
for (const auto& item : items_) {
const char* const end = text.data() + text.size();
- text = string_view(end, (base + item.text_end) - end);
+ text =
+ string_view(end, static_cast<size_t>((base + item.text_end) - end));
if (item.is_conversion) {
if (!consumer.ConvertOne(item.conv, text)) return false;
} else {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h
index 7313192abf..d4ef8a2c86 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_join_internal.h
@@ -229,10 +229,11 @@ TString JoinAlgorithm(Iterator start, Iterator end, y_absl::string_view s,
TString result;
if (start != end) {
// Sums size
- size_t result_size = start->size();
+ auto&& start_value = *start;
+ size_t result_size = start_value.size();
for (Iterator it = start; ++it != end;) {
result_size += s.size();
- result_size += it->size();
+ result_size += (*it).size();
}
if (result_size > 0) {
@@ -240,13 +241,15 @@ TString JoinAlgorithm(Iterator start, Iterator end, y_absl::string_view s,
// Joins strings
char* result_buf = &*result.begin();
- memcpy(result_buf, start->data(), start->size());
- result_buf += start->size();
+
+ memcpy(result_buf, start_value.data(), start_value.size());
+ result_buf += start_value.size();
for (Iterator it = start; ++it != end;) {
memcpy(result_buf, s.data(), s.size());
result_buf += s.size();
- memcpy(result_buf, it->data(), it->size());
- result_buf += it->size();
+ auto&& value = *it;
+ memcpy(result_buf, value.data(), value.size());
+ result_buf += value.size();
}
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h
index f46e814eff..d325f5e2a9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/string_constant.h
@@ -35,17 +35,25 @@ namespace strings_internal {
// below.
template <typename T>
struct StringConstant {
+ private:
+ static constexpr bool TryConstexprEval(y_absl::string_view view) {
+ return view.empty() || 2 * view[0] != 1;
+ }
+
+ public:
static constexpr y_absl::string_view value = T{}();
constexpr y_absl::string_view operator()() const { return value; }
// Check to be sure `view` points to constant data.
// Otherwise, it can't be constant evaluated.
- static_assert(value.empty() || 2 * value[0] != 1,
+ static_assert(TryConstexprEval(value),
"The input string_view must point to constant data.");
};
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T>
-constexpr y_absl::string_view StringConstant<T>::value; // NOLINT
+constexpr y_absl::string_view StringConstant<T>::value;
+#endif
// Factory function for `StringConstant` instances.
// It supports callables that have a constexpr default constructor and a
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc
index bccb5bf81e..c49e650158 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/utf8.cc
@@ -25,25 +25,25 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
*buffer = static_cast<char>(utf8_char);
return 1;
} else if (utf8_char <= 0x7FF) {
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xC0 | utf8_char;
+ buffer[0] = static_cast<char>(0xC0 | utf8_char);
return 2;
} else if (utf8_char <= 0xFFFF) {
- buffer[2] = 0x80 | (utf8_char & 0x3F);
+ buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xE0 | utf8_char;
+ buffer[0] = static_cast<char>(0xE0 | utf8_char);
return 3;
} else {
- buffer[3] = 0x80 | (utf8_char & 0x3F);
+ buffer[3] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[2] = 0x80 | (utf8_char & 0x3F);
+ buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xF0 | utf8_char;
+ buffer[0] = static_cast<char>(0xF0 | utf8_char);
return 4;
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
index fec739ec89..e2c8ddf329 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
@@ -757,8 +757,8 @@ struct LookupTables {
//
// uint128& operator/=(uint128) is not constexpr, so hardcode the resulting
// array to avoid a static initializer.
-template<>
-const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
+template <>
+Y_ABSL_CONST_INIT const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
0,
0,
MakeUint128(9223372036854775807u, 18446744073709551615u),
@@ -809,8 +809,8 @@ const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
//
// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVmaxOverBase[] = {
+template <>
+Y_ABSL_CONST_INIT const int128 LookupTables<int128>::kVmaxOverBase[] = {
0,
0,
MakeInt128(4611686018427387903, 18446744073709551615u),
@@ -862,8 +862,8 @@ const int128 LookupTables<int128>::kVmaxOverBase[] = {
//
// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVminOverBase[] = {
+template <>
+Y_ABSL_CONST_INIT const int128 LookupTables<int128>::kVminOverBase[] = {
0,
0,
MakeInt128(-4611686018427387904, 0u),
@@ -904,11 +904,11 @@ const int128 LookupTables<int128>::kVminOverBase[] = {
};
template <typename IntType>
-const IntType LookupTables<IntType>::kVmaxOverBase[] =
+Y_ABSL_CONST_INIT const IntType LookupTables<IntType>::kVmaxOverBase[] =
X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max());
template <typename IntType>
-const IntType LookupTables<IntType>::kVminOverBase[] =
+Y_ABSL_CONST_INIT const IntType LookupTables<IntType>::kVminOverBase[] =
X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::min());
#undef X_OVER_BASE_INITIALIZER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
index 3f19d1dacd..b16ac44d0c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
@@ -23,12 +23,12 @@
#ifndef Y_ABSL_STRINGS_NUMBERS_H_
#define Y_ABSL_STRINGS_NUMBERS_H_
-#ifdef __SSE4_2__
+#ifdef __SSSE3__
+#include <tmmintrin.h>
+#endif
+
#ifdef _MSC_VER
#include <intrin.h>
-#else
-#include <x86intrin.h>
-#endif
#endif
#include <cstddef>
@@ -40,14 +40,7 @@
#include <type_traits>
#include "y_absl/base/config.h"
-#ifdef __SSE4_2__
-// TODO(jorg): Remove this when we figure out the right way
-// to swap bytes on SSE 4.2 that works with the compilers
-// we claim to support. Also, add tests for the compiler
-// that doesn't support the Intel _bswap64 intrinsic but
-// does support all the SSE 4.2 intrinsics
#include "y_absl/base/internal/endian.h"
-#endif
#include "y_absl/base/macros.h"
#include "y_absl/base/port.h"
#include "y_absl/numeric/bits.h"
@@ -185,16 +178,19 @@ char* FastIntToBuffer(int_type i, char* buffer) {
// TODO(jorg): This signed-ness check is used because it works correctly
// with enums, and it also serves to check that int_type is not a pointer.
// If one day something like std::is_signed<enum E> works, switch to it.
- if (static_cast<int_type>(1) - 2 < 0) { // Signed
- if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ // These conditions are constexpr bools to suppress MSVC warning C4127.
+ constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+ constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
+ if (kIsSigned) {
+ if (kUse64Bit) {
return FastIntToBuffer(static_cast<int64_t>(i), buffer);
- } else { // 32-bit or less
+ } else {
return FastIntToBuffer(static_cast<int32_t>(i), buffer);
}
- } else { // Unsigned
- if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ } else {
+ if (kUse64Bit) {
return FastIntToBuffer(static_cast<uint64_t>(i), buffer);
- } else { // 32-bit or less
+ } else {
return FastIntToBuffer(static_cast<uint32_t>(i), buffer);
}
}
@@ -213,22 +209,25 @@ Y_ABSL_MUST_USE_RESULT bool safe_strtoi_base(y_absl::string_view s, int_type* ou
// TODO(jorg): This signed-ness check is used because it works correctly
// with enums, and it also serves to check that int_type is not a pointer.
// If one day something like std::is_signed<enum E> works, switch to it.
- if (static_cast<int_type>(1) - 2 < 0) { // Signed
- if (sizeof(*out) == 64 / 8) { // 64-bit
+ // These conditions are constexpr bools to suppress MSVC warning C4127.
+ constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+ constexpr bool kUse64Bit = sizeof(*out) == 64 / 8;
+ if (kIsSigned) {
+ if (kUse64Bit) {
int64_t val;
parsed = numbers_internal::safe_strto64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else { // 32-bit
+ } else {
int32_t val;
parsed = numbers_internal::safe_strto32_base(s, &val, base);
*out = static_cast<int_type>(val);
}
- } else { // Unsigned
- if (sizeof(*out) == 64 / 8) { // 64-bit
+ } else {
+ if (kUse64Bit) {
uint64_t val;
parsed = numbers_internal::safe_strtou64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else { // 32-bit
+ } else {
uint32_t val;
parsed = numbers_internal::safe_strtou32_base(s, &val, base);
*out = static_cast<int_type>(val);
@@ -244,7 +243,7 @@ Y_ABSL_MUST_USE_RESULT bool safe_strtoi_base(y_absl::string_view s, int_type* ou
// Returns the number of non-pad digits of the output (it can never be zero
// since 0 has one digit).
inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
-#ifdef __SSE4_2__
+#ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
uint64_t be = y_absl::big_endian::FromHost64(val);
const auto kNibbleMask = _mm_set1_epi8(0xf);
const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7',
@@ -263,7 +262,7 @@ inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
}
#endif
// | 0x1 so that even 0 has 1 digit.
- return 16 - countl_zero(val | 0x1) / 4;
+ return 16 - static_cast<size_t>(countl_zero(val | 0x1) / 4);
}
} // namespace numbers_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
index 4f5e6e4df6..59ef206816 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
@@ -214,23 +214,29 @@ class AlphaNum {
// A bool ctor would also convert incoming pointers (bletch).
AlphaNum(int x) // NOLINT(runtime/explicit)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned int x) // NOLINT(runtime/explicit)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(long long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned long long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(float f) // NOLINT(runtime/explicit)
: piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
@@ -245,7 +251,8 @@ class AlphaNum {
const strings_internal::AlphaNumBuffer<size>& buf)
: piece_(&buf.data[0], buf.size) {}
- AlphaNum(const char* c_str) : piece_(c_str) {} // NOLINT(runtime/explicit)
+ AlphaNum(const char* c_str) // NOLINT(runtime/explicit)
+ : piece_(NullSafeStringView(c_str)) {} // NOLINT(runtime/explicit)
AlphaNum(y_absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit)
template <typename Allocator>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h
index 613934b466..f23fe9d102 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_join.h
@@ -72,21 +72,15 @@ Y_ABSL_NAMESPACE_BEGIN
// functions. You may provide your own Formatter to enable `y_absl::StrJoin()` to
// work with arbitrary types.
//
-// The following is an example of a custom Formatter that simply uses
-// `std::to_string()` to format an integer as a TString.
-//
-// struct MyFormatter {
-// void operator()(TString* out, int i) const {
-// out->append(std::to_string(i));
-// }
-// };
-//
-// You would use the above formatter by passing an instance of it as the final
-// argument to `y_absl::StrJoin()`:
-//
-// std::vector<int> v = {1, 2, 3, 4};
-// TString s = y_absl::StrJoin(v, "-", MyFormatter());
-// EXPECT_EQ("1-2-3-4", s);
+// The following is an example of a custom Formatter that uses
+// `y_absl::FormatDuration` to join a list of `y_absl::Duration`s.
+//
+// std::vector<y_absl::Duration> v = {y_absl::Seconds(1), y_absl::Milliseconds(10)};
+// TString s =
+// y_absl::StrJoin(v, ", ", [](TString* out, y_absl::Duration dur) {
+// y_absl::StrAppend(out, y_absl::FormatDuration(dur));
+// });
+// EXPECT_EQ("1s, 10ms", s);
//
// The following standard formatters are provided within this file:
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h
index 6b3faa11ea..8cc36e1c6b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.h
@@ -461,8 +461,7 @@ using EnableSplitIfString =
// first two split strings become the `std::pair` `.first` and `.second`
// members, respectively. The remaining split substrings are discarded. If there
// are less than two split substrings, the empty string is used for the
-// corresponding
-// `std::pair` member.
+// corresponding `std::pair` member.
//
// Example:
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
index f58c65269b..cfc9f7cdb2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
@@ -207,22 +207,11 @@ string_view::size_type string_view::find_last_not_of(
return npos;
}
-// MSVC has non-standard behavior that implicitly creates definitions for static
-// const members. These implicit definitions conflict with explicit out-of-class
-// member definitions that are required by the C++ standard, resulting in
-// LNK1169 "multiply defined" errors at link time. __declspec(selectany) asks
-// MSVC to choose only one definition for the symbol it decorates. See details
-// at https://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx
-#ifdef _MSC_VER
-#define Y_ABSL_STRING_VIEW_SELECTANY __declspec(selectany)
-#else
-#define Y_ABSL_STRING_VIEW_SELECTANY
-#endif
-Y_ABSL_STRING_VIEW_SELECTANY
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr string_view::size_type string_view::npos;
-Y_ABSL_STRING_VIEW_SELECTANY
constexpr string_view::size_type string_view::kMaxSize;
+#endif
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
index 443759051e..3c725fbf5d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
@@ -57,8 +57,9 @@ Y_ABSL_NAMESPACE_END
#error "std::string_view should be used in all configurations"
-#if Y_ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
- (defined(__GNUC__) && !defined(__clang__))
+#if Y_ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
+ (defined(__GNUC__) && !defined(__clang__)) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1928)
#define Y_ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp
#else // Y_ABSL_HAVE_BUILTIN(__builtin_memcmp)
#define Y_ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h
index c202cc68ba..1d1d16e698 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/strip.h
@@ -34,8 +34,9 @@ Y_ABSL_NAMESPACE_BEGIN
// ConsumePrefix()
//
-// Strips the `expected` prefix from the start of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` prefix, if found, from the start of `str`.
+// If the operation succeeded, `true` is returned. If not, `false`
+// is returned and `str` is not modified.
//
// Example:
//
@@ -49,8 +50,9 @@ inline bool ConsumePrefix(y_absl::string_view* str, y_absl::string_view expected
}
// ConsumeSuffix()
//
-// Strips the `expected` suffix from the end of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` suffix, if found, from the end of `str`.
+// If the operation succeeded, `true` is returned. If not, `false`
+// is returned and `str` is not modified.
//
// Example:
//
@@ -65,7 +67,7 @@ inline bool ConsumeSuffix(y_absl::string_view* str, y_absl::string_view expected
// StripPrefix()
//
-// Returns a view into the input string 'str' with the given 'prefix' removed,
+// Returns a view into the input string `str` with the given `prefix` removed,
// but leaving the original string intact. If the prefix does not match at the
// start of the string, returns the original string instead.
Y_ABSL_MUST_USE_RESULT inline y_absl::string_view StripPrefix(
@@ -76,7 +78,7 @@ Y_ABSL_MUST_USE_RESULT inline y_absl::string_view StripPrefix(
// StripSuffix()
//
-// Returns a view into the input string 'str' with the given 'suffix' removed,
+// Returns a view into the input string `str` with the given `suffix` removed,
// but leaving the original string intact. If the suffix does not match at the
// end of the string, returns the original string instead.
Y_ABSL_MUST_USE_RESULT inline y_absl::string_view StripSuffix(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h
index f956619957..cb49392ddb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/substitute.h
@@ -161,8 +161,8 @@ class Arg {
Arg(Hex hex); // NOLINT(runtime/explicit)
Arg(Dec dec); // NOLINT(runtime/explicit)
- // vector<bool>::reference and const_reference require special help to
- // convert to `AlphaNum` because it requires two user defined conversions.
+ // vector<bool>::reference and const_reference require special help to convert
+ // to `Arg` because it requires two user defined conversions.
template <typename T,
y_absl::enable_if_t<
std::is_class<T>::value &&
@@ -176,6 +176,14 @@ class Arg {
// "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
Arg(const void* value); // NOLINT(runtime/explicit)
+ // Normal enums are already handled by the integer formatters.
+ // This overload matches only scoped enums.
+ template <typename T,
+ typename = typename std::enable_if<
+ std::is_enum<T>{} && !std::is_convertible<T, int>{}>::type>
+ Arg(T value) // NOLINT(google-explicit-constructor)
+ : Arg(static_cast<typename std::underlying_type<T>::type>(value)) {}
+
Arg(const Arg&) = delete;
Arg& operator=(const Arg&) = delete;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
index 446b3b2c6b..95787d5605 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
@@ -38,7 +38,7 @@ Y_ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
// Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
+static void ReclaimThreadIdentity(void* v) {
base_internal::ThreadIdentity* identity =
static_cast<base_internal::ThreadIdentity*>(v);
@@ -48,8 +48,6 @@ void ReclaimThreadIdentity(void* v) {
base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
}
- PerThreadSem::Destroy(identity);
-
// We must explicitly clear the current thread's identity:
// (a) Subsequent (unrelated) per-thread destructors may require an identity.
// We must guarantee a new identity is used in this case (this instructor
@@ -71,7 +69,12 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
+ PerThreadSem::Init(identity);
+}
+
+static void ResetThreadIdentityBetweenReuse(
+ base_internal::ThreadIdentity* identity) {
base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
pts->next = nullptr;
pts->skip = nullptr;
@@ -116,8 +119,9 @@ static base_internal::ThreadIdentity* NewThreadIdentity() {
identity = reinterpret_cast<base_internal::ThreadIdentity*>(
RoundUp(reinterpret_cast<intptr_t>(allocation),
base_internal::PerThreadSynch::kAlignment));
+ OneTimeInitThreadIdentity(identity);
}
- ResetThreadIdentity(identity);
+ ResetThreadIdentityBetweenReuse(identity);
return identity;
}
@@ -127,7 +131,6 @@ static base_internal::ThreadIdentity* NewThreadIdentity() {
// REQUIRES: CurrentThreadIdentity(false) == nullptr
base_internal::ThreadIdentity* CreateThreadIdentity() {
base_internal::ThreadIdentity* identity = NewThreadIdentity();
- PerThreadSem::Init(identity);
// Associate the value with the current thread, and attach our destructor.
base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
return identity;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h
index a7e22c04ac..79064340e5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.h
@@ -36,10 +36,6 @@ namespace synchronization_internal {
// For private use only.
base_internal::ThreadIdentity* CreateThreadIdentity();
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
index 85299308ad..d581d826af 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
@@ -47,10 +47,6 @@ void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
identity->is_idle.store(false, std::memory_order_relaxed);
}
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
- Waiter::GetWaiter(identity)->~Waiter();
-}
-
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
index 5e5c875486..6c7ae2f8c7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
@@ -66,10 +66,6 @@ class PerThreadSem {
// REQUIRES: May only be called by ThreadIdentity.
static void Init(base_internal::ThreadIdentity* identity);
- // Destroy the PerThreadSem associated with "identity".
- // REQUIRES: May only be called by ThreadIdentity.
- static void Destroy(base_internal::ThreadIdentity* identity);
-
// Increments "identity"'s count.
static inline void Post(base_internal::ThreadIdentity* identity);
@@ -81,8 +77,7 @@ class PerThreadSem {
// Permitted callers.
friend class PerThreadSemTest;
friend class y_absl::Mutex;
- friend y_absl::base_internal::ThreadIdentity* CreateThreadIdentity();
- friend void ReclaimThreadIdentity(void* v);
+ friend void OneTimeInitThreadIdentity(y_absl::base_internal::ThreadIdentity*);
};
} // namespace synchronization_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
index b69a0513f6..7c8c7988ae 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
@@ -71,8 +71,6 @@ Waiter::Waiter() {
futex_.store(0, std::memory_order_relaxed);
}
-Waiter::~Waiter() = default;
-
bool Waiter::Wait(KernelTimeout t) {
// Loop until we can atomically decrement futex from a positive
// value, waiting on a futex while we believe it is zero.
@@ -161,18 +159,6 @@ Waiter::Waiter() {
wakeup_count_ = 0;
}
-Waiter::~Waiter() {
- const int err = pthread_mutex_destroy(&mu_);
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
- }
-
- const int err2 = pthread_cond_destroy(&cv_);
- if (err2 != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
- }
-}
-
bool Waiter::Wait(KernelTimeout t) {
struct timespec abs_timeout;
if (t.has_timeout()) {
@@ -240,12 +226,6 @@ Waiter::Waiter() {
wakeups_.store(0, std::memory_order_relaxed);
}
-Waiter::~Waiter() {
- if (sem_destroy(&sem_) != 0) {
- Y_ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
- }
-}
-
bool Waiter::Wait(KernelTimeout t) {
struct timespec abs_timeout;
if (t.has_timeout()) {
@@ -363,11 +343,6 @@ Waiter::Waiter() {
wakeup_count_ = 0;
}
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
bool Waiter::Wait(KernelTimeout t) {
SRWLOCK *mu = WinHelper::GetLock(this);
CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
index 4eb96ee824..17e81655fc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
@@ -71,9 +71,6 @@ class Waiter {
Waiter(const Waiter&) = delete;
Waiter& operator=(const Waiter&) = delete;
- // Destroy any data to track waits.
- ~Waiter();
-
// Blocks the calling thread until a matching call to `Post()` or
// `t` has passed. Returns `true` if woken (`Post()` called),
// `false` on timeout.
@@ -106,6 +103,12 @@ class Waiter {
#endif
private:
+ // The destructor must not be called since Mutex/CondVar
+ // can use PerThreadSem/Waiter after the thread exits.
+ // Waiter objects are embedded in ThreadIdentity objects,
+ // which are reused via a freelist and are never destroyed.
+ ~Waiter() = delete;
+
#if Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_FUTEX
// Futexes are defined by specification to be 32-bits.
// Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
@@ -136,8 +139,11 @@ class Waiter {
// REQUIRES: WinHelper::GetLock(this) must be held.
void InternalCondVarPoke();
- // We can't include Windows.h in our headers, so we use aligned charachter
+ // We can't include Windows.h in our headers, so we use aligned character
// buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ // SRW locks and condition variables do not need to be explicitly destroyed.
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+ // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
alignas(void*) unsigned char mu_storage_[sizeof(void*)];
alignas(void*) unsigned char cv_storage_[sizeof(void*)];
int waiter_count_;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
index 09b7acbccd..27e6a84f7a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
@@ -109,7 +109,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
bool locking, bool trylock,
bool read_lock);
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
@@ -1744,23 +1744,33 @@ Y_ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
Y_ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
}
-// The zap_desig_waker bitmask is used to clear the designated waker flag in
-// the mutex if this thread has blocked, and therefore may be the designated
-// waker.
-static const intptr_t zap_desig_waker[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuDesig) // blocked; turn off the designated waker bit
-};
+// Clears the designated waker flag in the mutex if this thread has blocked, and
+// therefore may be the designated waker.
+static intptr_t ClearDesignatedWakerMask(int flag) {
+ assert(flag >= 0);
+ assert(flag <= 1);
+ switch (flag) {
+ case 0: // not blocked
+ return ~static_cast<intptr_t>(0);
+ case 1: // blocked; turn off the designated waker bit
+ return ~static_cast<intptr_t>(kMuDesig);
+ }
+ Y_ABSL_INTERNAL_UNREACHABLE;
+}
-// The ignore_waiting_writers bitmask is used to ignore the existence
-// of waiting writers if a reader that has already blocked once
-// wakes up.
-static const intptr_t ignore_waiting_writers[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuWrWait) // blocked; pretend there are no waiting writers
-};
+// Conditionally ignores the existence of waiting writers if a reader that has
+// already blocked once wakes up.
+static intptr_t IgnoreWaitingWritersMask(int flag) {
+ assert(flag >= 0);
+ assert(flag <= 1);
+ switch (flag) {
+ case 0: // not blocked
+ return ~static_cast<intptr_t>(0);
+ case 1: // blocked; pretend there are no waiting writers
+ return ~static_cast<intptr_t>(kMuWrWait);
+ }
+ Y_ABSL_INTERNAL_UNREACHABLE;
+}
// Internal version of LockWhen(). See LockSlowWithDeadline()
Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
@@ -1852,8 +1862,10 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
bool unlock = false;
if ((v & how->fast_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong(
- v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
- how->fast_add,
+ v,
+ (how->fast_or |
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+ how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
@@ -1927,9 +1939,10 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
CheckForMutexCorruption(v, "Lock");
if ((v & waitp->how->slow_need_zero) == 0) {
if (mu_.compare_exchange_strong(
- v, (waitp->how->fast_or |
- (v & zap_desig_waker[flags & kMuHasBlocked])) +
- waitp->how->fast_add,
+ v,
+ (waitp->how->fast_or |
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+ waitp->how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
@@ -1946,8 +1959,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
- intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
- kMuWait;
+ intptr_t nv =
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
+ kMuWait;
Y_ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
nv |= kMuWrWait;
@@ -1961,12 +1975,13 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp = nullptr;
}
} else if ((v & waitp->how->slow_inc_need_zero &
- ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+ IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
// This is a reader that needs to increment the reader count,
// but the count is currently held in the last waiter.
if (mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuReader,
+ v,
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+ kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
h->readers += kMuOne; // inc reader count in waiter
@@ -1987,8 +2002,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
}
} else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuWait,
+ v,
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+ kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
@@ -2315,19 +2331,21 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
- int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
- bool cond_waiter = wake_list->cond_waiter;
+ int64_t wait_cycles = 0;
+ int64_t now = base_internal::CycleClock::Now();
do {
+ // Sample lock contention events only if the waiter was trying to acquire
+ // the lock, not waiting on a condition variable or Condition.
+ if (!wake_list->cond_waiter) {
+ wait_cycles += (now - wake_list->waitp->contention_start_cycles);
+ wake_list->waitp->contention_start_cycles = now;
+ }
wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
- if (!cond_waiter) {
- // Sample lock contention events only if the (first) waiter was trying to
- // acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles =
- base_internal::CycleClock::Now() - enqueue_timestamp;
+ if (wait_cycles > 0) {
mutex_tracer("slow release", this, wait_cycles);
Y_ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- submit_profile_data(enqueue_timestamp);
+ submit_profile_data(wait_cycles);
Y_ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
}
}
@@ -2492,9 +2510,9 @@ void CondVar::Remove(PerThreadSynch *s) {
// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
// the logging code, or via a Condition function) and might potentially attempt
// to block this thread. That would be a problem if the thread were already on
-// a the condition variable waiter queue. Thus, we use the waitp->cv_word
-// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
-// condition variable queue just before the mutex is to be unlocked, and (most
+// a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
+// the unlock code to call CondVarEnqueue() to queue the thread on the condition
+// variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the
// mutex code.
static void CondVarEnqueue(SynchWaitParams *waitp) {
@@ -2557,6 +2575,23 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
while (waitp.thread->state.load(std::memory_order_acquire) ==
PerThreadSynch::kQueued) {
if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+ // DecrementSynchSem returned due to timeout.
+ // Now we will either (1) remove ourselves from the wait list in Remove
+ // below, in which case Remove will set thread.state = kAvailable and
+ // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
+ // has removed us concurrently and is calling Wakeup, which will set
+ // thread.state = kAvailable and post to the semaphore.
+ // It's important to reset the timeout for the case (2) because otherwise
+ // we can live-lock in this loop since DecrementSynchSem will always
+ // return immediately due to timeout, but Signal/SignalAll is not
+ // necessary set thread.state = kAvailable yet (and is not scheduled
+ // due to thread priorities or other scheduler artifacts).
+ // Note this could also be resolved if Signal/SignalAll would set
+ // thread.state = kAvailable while holding the wait list spin lock.
+ // But this can't be easily done for SignalAll since it grabs the whole
+ // wait list with a single compare-exchange and does not really grab
+ // the spin lock.
+ t = KernelTimeout::Never();
this->Remove(waitp.thread);
rc = true;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
index fab53e867b..1cadd01bd8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
@@ -174,9 +174,12 @@ class Y_ABSL_LOCKABLE Mutex {
// Mutex::AssertHeld()
//
- // Return immediately if this thread holds the `Mutex` exclusively (in write
- // mode). Otherwise, may report an error (typically by crashing with a
- // diagnostic), or may return immediately.
+ // Require that the mutex be held exclusively (write mode) by this thread.
+ //
+ // If the mutex is not currently held by this thread, this function may report
+ // an error (typically by crashing with a diagnostic) or it may do nothing.
+ // This function is intended only as a tool to assist debugging; it doesn't
+ // guarantee correctness.
void AssertHeld() const Y_ABSL_ASSERT_EXCLUSIVE_LOCK();
// ---------------------------------------------------------------------------
@@ -236,9 +239,13 @@ class Y_ABSL_LOCKABLE Mutex {
// Mutex::AssertReaderHeld()
//
- // Returns immediately if this thread holds the `Mutex` in at least shared
- // mode (read mode). Otherwise, may report an error (typically by
- // crashing with a diagnostic), or may return immediately.
+ // Require that the mutex be held at least in shared mode (read mode) by this
+ // thread.
+ //
+ // If the mutex is not currently held by this thread, this function may report
+ // an error (typically by crashing with a diagnostic) or it may do nothing.
+ // This function is intended only as a tool to assist debugging; it doesn't
+ // guarantee correctness.
void AssertReaderHeld() const Y_ABSL_ASSERT_SHARED_LOCK();
// Mutex::WriterLock()
@@ -984,14 +991,15 @@ inline Condition::Condition(const T *object,
// Register a hook for profiling support.
//
// The function pointer registered here will be called whenever a mutex is
-// contended. The callback is given the y_absl/base/cycleclock.h timestamp when
-// waiting began.
+// contended. The callback is given the cycles for which waiting happened (as
+// measured by //y_absl/base/internal/cycleclock.h, and which may not
+// be real "cycle" counts.)
//
// Calls to this function do not race or block, but there is no ordering
// guaranteed between calls to this function and call to the provided hook.
// In particular, the previously registered hook may still be called for some
// time after this function returns.
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
// Register a hook for Mutex tracing.
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h
index 0794cc35e9..e4a77271e9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.h
@@ -22,7 +22,7 @@
// The `Notification` object maintains a private boolean "notified" state that
// transitions to `true` at most once. The `Notification` class provides the
// following primary member functions:
-// * `HasBeenNotified() `to query its state
+// * `HasBeenNotified()` to query its state
// * `WaitForNotification*()` to have threads wait until the "notified" state
// is `true`.
// * `Notify()` to set the notification's "notified" state to `true` and
@@ -52,6 +52,7 @@
#include <atomic>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/macros.h"
#include "y_absl/synchronization/mutex.h"
#include "y_absl/time/time.h"
@@ -74,7 +75,7 @@ class Notification {
// Notification::HasBeenNotified()
//
// Returns the value of the notification's internal "notified" state.
- bool HasBeenNotified() const {
+ Y_ABSL_MUST_USE_RESULT bool HasBeenNotified() const {
return HasBeenNotifiedInternal(&this->notified_yet_);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
index cc646c8870..c9a3b73b73 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
@@ -766,13 +766,14 @@ void AppendNumberUnit(TString* out, double n, DisplayUnit unit) {
// is non-zero.
// Unlike Go, we format the zero duration as 0, with no unit.
TString FormatDuration(Duration d) {
- const Duration min_duration = Seconds(kint64min);
- if (d == min_duration) {
+ constexpr Duration kMinDuration = Seconds(kint64min);
+ TString s;
+ if (d == kMinDuration) {
// Avoid needing to negate kint64min by directly returning what the
// following code should produce in that case.
- return "-2562047788015215h30m8s";
+ s = "-2562047788015215h30m8s";
+ return s;
}
- TString s;
if (d < ZeroDuration()) {
s.append("-");
d = -d;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h
index f2385d6754..b5cd52c7cf 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept {
return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
}
CONSTEXPR_F int year_index(year_t y, month_t m) noexcept {
- return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400;
+ const int yi = static_cast<int>((y + (m > 2)) % 400);
+ return yi < 0 ? yi + 400 : yi;
}
-CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept {
- const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_century(int yi) noexcept {
return 36524 + (yi == 0 || yi > 300);
}
-CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept {
- const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_4years(int yi) noexcept {
return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96);
}
CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
@@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh,
}
}
if (d > 365) {
+ int yi = year_index(ey, m); // Index into Gregorian 400 year cycle.
for (;;) {
- int n = days_per_century(ey, m);
+ int n = days_per_century(yi);
if (d <= n) break;
d -= n;
ey += 100;
+ yi += 100;
+ if (yi >= 400) yi -= 400;
}
for (;;) {
- int n = days_per_4years(ey, m);
+ int n = days_per_4years(yi);
if (d <= n) break;
d -= n;
ey += 4;
+ yi += 4;
+ if (yi >= 400) yi -= 400;
}
for (;;) {
int n = days_per_year(ey, m);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
index e3df4abd21..99009dfc30 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
@@ -120,7 +120,7 @@ using EnableIfFloat =
// Duration
//
-// The `y_absl::Duration` class represents a signed, fixed-length span of time.
+// The `y_absl::Duration` class represents a signed, fixed-length amount of time.
// A `Duration` is generated using a unit-specific factory function, or is
// the result of subtracting one `y_absl::Time` from another. Durations behave
// like unit-safe integers and they support all the natural integer-like
@@ -162,7 +162,7 @@ class Duration {
constexpr Duration() : rep_hi_(0), rep_lo_(0) {} // zero-length duration
// Copyable.
-#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1910
+#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1930
// Explicitly defining the constexpr copy constructor avoids an MSVC bug.
constexpr Duration(const Duration& d)
: rep_hi_(d.rep_hi_), rep_lo_(d.rep_lo_) {}
@@ -579,7 +579,7 @@ bool ParseDuration(y_absl::string_view dur_string, Duration* d);
// AbslParseFlag()
//
-// Parses a command-line flag string representation `text` into a a Duration
+// Parses a command-line flag string representation `text` into a Duration
// value. Duration flags must be specified in a format that is valid input for
// `y_absl::ParseDuration()`.
bool AbslParseFlag(y_absl::string_view text, Duration* dst, TString* error);
@@ -750,23 +750,24 @@ constexpr Time UnixEpoch() { return Time(); }
constexpr Time UniversalEpoch() {
// 719162 is the number of days from 0001-01-01 to 1970-01-01,
// assuming the Gregorian calendar.
- return Time(time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, 0U));
+ return Time(
+ time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, uint32_t{0}));
}
// InfiniteFuture()
//
// Returns an `y_absl::Time` that is infinitely far in the future.
constexpr Time InfiniteFuture() {
- return Time(
- time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U));
+ return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
+ ~uint32_t{0}));
}
// InfinitePast()
//
// Returns an `y_absl::Time` that is infinitely far in the past.
constexpr Time InfinitePast() {
- return Time(
- time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U));
+ return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(),
+ ~uint32_t{0}));
}
// FromUnixNanos()
@@ -1422,14 +1423,17 @@ constexpr int64_t GetRepHi(Duration d) { return d.rep_hi_; }
constexpr uint32_t GetRepLo(Duration d) { return d.rep_lo_; }
// Returns true iff d is positive or negative infinity.
-constexpr bool IsInfiniteDuration(Duration d) { return GetRepLo(d) == ~0U; }
+constexpr bool IsInfiniteDuration(Duration d) {
+ return GetRepLo(d) == ~uint32_t{0};
+}
// Returns an infinite Duration with the opposite sign.
// REQUIRES: IsInfiniteDuration(d)
constexpr Duration OppositeInfinity(Duration d) {
return GetRepHi(d) < 0
- ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U)
- : MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U);
+ ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~uint32_t{0})
+ : MakeDuration((std::numeric_limits<int64_t>::min)(),
+ ~uint32_t{0});
}
// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
@@ -1568,7 +1572,7 @@ constexpr Duration operator-(Duration d) {
constexpr Duration InfiniteDuration() {
return time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
- ~0U);
+ ~uint32_t{0});
}
constexpr Duration FromChrono(const std::chrono::nanoseconds& d) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
index 9b20551d2e..34ae94b275 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
@@ -91,7 +91,15 @@ class optional_data_dtor_base {
void destruct() noexcept {
if (engaged_) {
+ // `data_` must be initialized if `engaged_` is true.
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
data_.~T();
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
engaged_ = false;
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
index 606d985526..03fb92016a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
@@ -16,8 +16,8 @@
// separate file to avoid cluttering the top of the API header with
// implementation details.
-#ifndef Y_ABSL_TYPES_variant_internal_H_
-#define Y_ABSL_TYPES_variant_internal_H_
+#ifndef Y_ABSL_TYPES_VARIANT_INTERNAL_H_
+#define Y_ABSL_TYPES_VARIANT_INTERNAL_H_
#include <cassert>
#include <cstddef>
@@ -1643,4 +1643,4 @@ Y_ABSL_NAMESPACE_END
} // namespace y_absl
#endif // !defined(Y_ABSL_USES_STD_VARIANT)
-#endif // Y_ABSL_TYPES_variant_internal_H_
+#endif // Y_ABSL_TYPES_VARIANT_INTERNAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
index 6c988a64ce..2a71ecb6c4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
@@ -282,15 +282,16 @@ class optional : private optional_internal::optional_data<T>,
optional& operator=(optional&& src) = default;
// Value assignment operators
- template <
- typename U = T,
- typename = typename std::enable_if<y_absl::conjunction<
- y_absl::negation<
- std::is_same<optional<T>, typename std::decay<U>::type>>,
- y_absl::negation<
- y_absl::conjunction<std::is_scalar<T>,
- std::is_same<T, typename std::decay<U>::type>>>,
- std::is_constructible<T, U>, std::is_assignable<T&, U>>::value>::type>
+ template <typename U = T,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
+ typename = typename std::enable_if<y_absl::conjunction<
+ y_absl::negation<
+ std::is_same<optional<T>, typename std::decay<U>::type> >,
+ y_absl::negation<y_absl::conjunction<
+ std::is_scalar<T>,
+ std::is_same<T, typename std::decay<U>::type> > >,
+ std::is_constructible<T, U>,
+ std::is_assignable<T&, U> >::value>::type>
optional& operator=(U&& v) {
this->assign(std::forward<U>(v));
return *this;
@@ -298,13 +299,14 @@ class optional : private optional_internal::optional_data<T>,
template <
typename U,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
typename = typename std::enable_if<y_absl::conjunction<
- y_absl::negation<std::is_same<T, U>>,
+ y_absl::negation<std::is_same<T, U> >,
std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>,
y_absl::negation<
optional_internal::
is_constructible_convertible_assignable_from_optional<
- T, U>>>::value>::type>
+ T, U> > >::value>::type>
optional& operator=(const optional<U>& rhs) {
if (rhs) {
this->assign(*rhs);
@@ -315,13 +317,14 @@ class optional : private optional_internal::optional_data<T>,
}
template <typename U,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
typename = typename std::enable_if<y_absl::conjunction<
- y_absl::negation<std::is_same<T, U>>, std::is_constructible<T, U>,
- std::is_assignable<T&, U>,
+ y_absl::negation<std::is_same<T, U> >,
+ std::is_constructible<T, U>, std::is_assignable<T&, U>,
y_absl::negation<
optional_internal::
is_constructible_convertible_assignable_from_optional<
- T, U>>>::value>::type>
+ T, U> > >::value>::type>
optional& operator=(optional<U>&& rhs) {
if (rhs) {
this->assign(std::move(*rhs));
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
index 5961eb9927..ec5338601d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
@@ -664,7 +664,8 @@ constexpr Span<T> MakeSpan(T* ptr, size_t size) noexcept {
template <int&... ExplicitArgumentBarrier, typename T>
Span<T> MakeSpan(T* begin, T* end) noexcept {
- return Y_ABSL_HARDENING_ASSERT(begin <= end), Span<T>(begin, end - begin);
+ return Y_ABSL_HARDENING_ASSERT(begin <= end),
+ Span<T>(begin, static_cast<size_t>(end - begin));
}
template <int&... ExplicitArgumentBarrier, typename C>