aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.com>2023-08-20 23:36:15 +0300
committerthegeorg <thegeorg@yandex-team.com>2023-08-21 00:54:32 +0300
commit4ffd2d398873ff2e3f1c28fbb1d647d26a9600d1 (patch)
treef023ac7871845812356d6ace5e07d059bdf270b4
parentd619e9fffe040fe2f8b4940b1a72cc1757538c8c (diff)
downloadydb-4ffd2d398873ff2e3f1c28fbb1d647d26a9600d1.tar.gz
Update contrib/restricted/abseil-cpp-tstring to 20230802.0
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h63
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h198
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h27
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc89
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h138
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc35
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc104
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h22
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc55
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/prefetch.h198
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h83
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h246
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common_policy_traits.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h46
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h111
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h53
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc99
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h673
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/crc32c.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/cpu_detect.cc22
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.cc39
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.h12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc32_x86_arm_combined_simd.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_internal.h18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_memcpy_x86_64.cc36
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_x86_arm_combined.cc16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc48
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc82
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc62
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h31
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag_msvc.inc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/parse.h17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.cc52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.h42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.cc45
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.cc295
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.h108
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage.cc1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/any_invocable.h10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/any_invocable.h61
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h53
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h421
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h75
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc77
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/distribution_test_util.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/generate_real.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/platform.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_engine.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/uniform_helper.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h36
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h13
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h30
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc60
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h37
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc24
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc17
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc66
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h35
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc23
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc79
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h116
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc41
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/constexpr_parser.h1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h62
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc87
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h19
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc276
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h7
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc57
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h98
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc15
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc30
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.darwin-x86_64.txt8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-aarch64.txt8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-x86_64.txt8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.windows-x86_64.txt8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc5
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h106
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc111
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.h63
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc225
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h236
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc20
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc167
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.h60
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc122
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.h65
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc91
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.h56
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc403
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h132
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc42
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.h90
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc151
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.h70
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc818
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h270
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc13
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc50
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h1
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc14
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc597
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h25
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc84
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h9
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc130
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc68
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc11
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h130
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h52
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h28
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h3
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make4
197 files changed, 6161 insertions, 4184 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
index 0a5a253c2d..b97a3fefb0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/container.h
@@ -1131,7 +1131,7 @@ c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) {
// to test if any element in the sorted container contains a value equivalent to
// 'value'.
template <typename Sequence, typename T>
-bool c_binary_search(Sequence&& sequence, const T& value) {
+bool c_binary_search(const Sequence& sequence, const T& value) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value);
@@ -1140,7 +1140,8 @@ bool c_binary_search(Sequence&& sequence, const T& value) {
// Overload of c_binary_search() for performing a `comp` comparison other than
// the default `operator<`.
template <typename Sequence, typename T, typename LessThan>
-bool c_binary_search(Sequence&& sequence, const T& value, LessThan&& comp) {
+bool c_binary_search(const Sequence& sequence, const T& value,
+ LessThan&& comp) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value, std::forward<LessThan>(comp));
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make
index 26a3c44152..8981153caa 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/algorithm/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20230125.3)
+VERSION(20230802.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230125.3.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230802.0.tar.gz)
NO_RUNTIME()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
index 420da2603d..f507057b1e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/attributes.h
@@ -211,11 +211,20 @@
// out of bounds or does other scary things with memory.
// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html
-#if Y_ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
+#if defined(Y_ABSL_HAVE_ADDRESS_SANITIZER) && \
+ Y_ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#elif defined(_MSC_VER) && _MSC_VER >= 1928
+#elif defined(Y_ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \
+ _MSC_VER >= 1928
// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
+#elif defined(Y_ABSL_HAVE_HWADDRESS_SANITIZER) && Y_ABSL_HAVE_ATTRIBUTE(no_sanitize)
+// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU
+// features to detect similar bugs with less CPU and memory overhead.
+// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11.
+// https://gcc.gnu.org/gcc-11/changes.html
+#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \
+ __attribute__((no_sanitize("hwaddress")))
#else
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
@@ -265,7 +274,7 @@
//
// Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
-#if Y_ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#if Y_ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__)
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
#else
#define Y_ABSL_ATTRIBUTE_NO_SANITIZE_CFI
@@ -322,8 +331,8 @@
// This functionality is supported by GNU linker.
#ifndef Y_ABSL_ATTRIBUTE_SECTION_VARIABLE
#ifdef _AIX
-// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo
-// op which includes an additional integer as part of its syntax indcating
+// __attribute__((section(#name))) on AIX is achieved by using the `.csect`
+// psudo op which includes an additional integer as part of its syntax indcating
// alignment. If data fall under different alignments then you might get a
// compilation error indicating a `Section type conflict`.
#define Y_ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
@@ -676,6 +685,28 @@
#define Y_ABSL_DEPRECATED(message)
#endif
+// When deprecating Abseil code, it is sometimes necessary to turn off the
+// warning within Abseil, until the deprecated code is actually removed. The
+// deprecated code can be surrounded with these directives to acheive that
+// result.
+//
+// class Y_ABSL_DEPRECATED("Use Bar instead") Foo;
+//
+// Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+// Baz ComputeBazFromFoo(Foo f);
+// Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#if defined(__GNUC__) || defined(__clang__)
+// Clang also supports these GCC pragmas.
+#define Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("GCC diagnostic pop")
+#else
+#define Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+#define Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#endif // defined(__GNUC__) || defined(__clang__)
+
// Y_ABSL_CONST_INIT
//
// A variable declaration annotated with the `Y_ABSL_CONST_INIT` attribute will
@@ -779,4 +810,26 @@
#define Y_ABSL_ATTRIBUTE_TRIVIAL_ABI
#endif
+// Y_ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+//
+// Indicates a data member can be optimized to occupy no space (if it is empty)
+// and/or its tail padding can be used for other members.
+//
+// For code that is assured to only build with C++20 or later, prefer using
+// the standard attribute `[[no_unique_address]]` directly instead of this
+// macro.
+//
+// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address
+// Current versions of MSVC have disabled `[[no_unique_address]]` since it
+// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for
+// situations when it can be assured that it is desired. Since Abseil does not
+// claim ABI compatibility in mixed builds, we can offer it unconditionally.
+#if defined(_MSC_VER) && _MSC_VER >= 1929
+#define Y_ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
+#elif Y_ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address)
+#define Y_ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define Y_ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+#endif
+
#endif // Y_ABSL_BASE_ATTRIBUTES_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h
index 93239f29e4..5121c2f8de 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/call_once.h
@@ -123,7 +123,7 @@ class SchedulingHelper {
private:
base_internal::SchedulingMode mode_;
- bool guard_result_;
+ bool guard_result_ = false;
};
// Bit patterns for call_once state machine values. Internal implementation
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
index e78b28c53b..caa3694398 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/casts.h
@@ -149,16 +149,16 @@ using std::bit_cast;
#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
-template <typename Dest, typename Source,
- typename std::enable_if<
- sizeof(Dest) == sizeof(Source) &&
- type_traits_internal::is_trivially_copyable<Source>::value &&
- type_traits_internal::is_trivially_copyable<Dest>::value
+template <
+ typename Dest, typename Source,
+ typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
+ std::is_trivially_copyable<Source>::value &&
+ std::is_trivially_copyable<Dest>::value
#if !Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
- && std::is_default_constructible<Dest>::value
+ && std::is_default_constructible<Dest>::value
#endif // !Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast)
- ,
- int>::type = 0>
+ ,
+ int>::type = 0>
#if Y_ABSL_HAVE_BUILTIN(__builtin_bit_cast) && (!defined(__CUDACC__) || CUDA_VERSION >= 11010)
inline constexpr Dest bit_cast(const Source& source) {
return __builtin_bit_cast(Dest, source);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
index 1b8c1ece56..2b3ac11bf9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/config.h
@@ -111,8 +111,8 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define Y_ABSL_LTS_RELEASE_VERSION 20230125
-#define Y_ABSL_LTS_RELEASE_PATCH_LEVEL 3
+#define Y_ABSL_LTS_RELEASE_VERSION 20230802
+#define Y_ABSL_LTS_RELEASE_PATCH_LEVEL 0
// Helper macro to convert a CPP variable to a string literal.
#define Y_ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -237,15 +237,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
//
// Checks whether `std::is_trivially_destructible<T>` is supported.
-//
-// Notes: All supported compilers using libc++ support this feature, as does
-// gcc >= 4.8.1 using libstdc++, and Visual Studio.
#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
-#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \
- (defined(__clang__) && __clang_major__ >= 15) || \
- (!defined(__clang__) && defined(__GLIBCXX__) && \
- Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8))
#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#endif
@@ -253,36 +246,26 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
//
// Checks whether `std::is_trivially_default_constructible<T>` and
// `std::is_trivially_copy_constructible<T>` are supported.
+#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#else
+#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#endif
// Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
//
// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
-
-// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with
-// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC).
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
-#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
-#elif defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
-#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
-#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \
- (defined(__clang__) && __clang_major__ >= 15) || \
- (!defined(__clang__) && \
- ((Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \
- (Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \
- defined(_LIBCPP_VERSION)))) || \
- (defined(_MSC_VER) && !defined(__NVCC__) && !defined(__clang__))
-#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set
+#else
#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
// Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
//
// Checks whether `std::is_trivially_copyable<T>` is supported.
-//
-// Notes: Clang 15+ with libc++ supports these features, GCC hasn't been tested.
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE)
+#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
#error Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set
-#elif defined(__clang__) && (__clang_major__ >= 15)
#define Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
@@ -429,7 +412,7 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
- defined(__QNX__)
+ defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
#define Y_ABSL_HAVE_MMAP 1
#endif
@@ -441,7 +424,7 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__VXWORKS__)
#define Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
@@ -460,7 +443,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// POSIX.1-2001.
#ifdef Y_ABSL_HAVE_SCHED_YIELD
#error Y_ABSL_HAVE_SCHED_YIELD cannot be directly set
-#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \
+ defined(__VXWORKS__)
#define Y_ABSL_HAVE_SCHED_YIELD 1
#endif
@@ -475,7 +459,7 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// platforms.
#ifdef Y_ABSL_HAVE_SEMAPHORE_H
#error Y_ABSL_HAVE_SEMAPHORE_H cannot be directly set
-#elif defined(__linux__) || defined(__ros__)
+#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__)
#define Y_ABSL_HAVE_SEMAPHORE_H 1
#endif
@@ -503,6 +487,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__Fuchsia__)
// Signals don't exist on fuchsia.
#elif defined(__native_client__)
+// Signals don't exist on hexagon/QuRT
+#elif defined(__hexagon__)
#else
// other standard libraries
#define Y_ABSL_HAVE_ALARM 1
@@ -536,41 +522,29 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error "y_absl endian detection needs to be set up for your compiler"
#endif
-// macOS < 10.13 and iOS < 11 don't let you use <any>, <optional>, or <variant>
-// even though the headers exist and are publicly noted to work, because the
-// libc++ shared library shipped on the system doesn't have the requisite
-// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and
+// macOS < 10.13 and iOS < 12 don't support <any>, <optional>, or <variant>
+// because the libc++ shared library shipped on the system doesn't have the
+// requisite exported symbols. See
+// https://github.com/abseil/abseil-cpp/issues/207 and
// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
//
// libc++ spells out the availability requirements in the file
// llvm-project/libcxx/include/__config via the #define
-// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
-//
-// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14
-// and iOS < 12 in the libc++ headers. This was corrected by
+// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been
+// modified a few times, via
// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
-// which subsequently made it into the XCode 12.5 release. We need to match the
-// old (incorrect) conditions when built with old XCode, but can use the
-// corrected earlier versions with new XCode.
-#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
- ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \
- (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))))
+// and
+// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a
+// The second has the actually correct versions, thus, is what we copy here.
+#if defined(__APPLE__) && \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
#define Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@@ -578,30 +552,28 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// Y_ABSL_HAVE_STD_ANY
//
-// Checks whether C++17 std::any is available by checking whether <any> exists.
+// Checks whether C++17 std::any is available.
#ifdef Y_ABSL_HAVE_STD_ANY
#error "Y_ABSL_HAVE_STD_ANY cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
+#elif defined(__cpp_lib_any)
+#define Y_ABSL_HAVE_STD_ANY 1
+#elif defined(Y_ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
!Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define Y_ABSL_HAVE_STD_ANY 1
#endif
-#endif
// Y_ABSL_HAVE_STD_OPTIONAL
//
// Checks whether C++17 std::optional is available.
#ifdef Y_ABSL_HAVE_STD_OPTIONAL
#error "Y_ABSL_HAVE_STD_OPTIONAL cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<optional>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#elif defined(__cpp_lib_optional)
+#define Y_ABSL_HAVE_STD_OPTIONAL 1
+#elif defined(Y_ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+ !Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define Y_ABSL_HAVE_STD_OPTIONAL 1
-#endif
#endif
// Y_ABSL_HAVE_STD_VARIANT
@@ -609,13 +581,12 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// Checks whether C++17 std::variant is available.
#ifdef Y_ABSL_HAVE_STD_VARIANT
#error "Y_ABSL_HAVE_STD_VARIANT cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<variant>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#elif defined(__cpp_lib_variant)
+#define Y_ABSL_HAVE_STD_VARIANT 1
+#elif defined(Y_ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+ !Y_ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define Y_ABSL_HAVE_STD_VARIANT 1
-#endif
#endif
// Y_ABSL_HAVE_STD_STRING_VIEW
@@ -623,23 +594,12 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// Checks whether C++17 std::string_view is available.
#ifdef Y_ABSL_HAVE_STD_STRING_VIEW
#error "Y_ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
-#endif
-
+#elif defined(__NVCC__)
#define Y_ABSL_HAVE_STD_STRING_VIEW 1
-
-// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
-// the support for <optional>, <any>, <string_view>, <variant>. So we use
-// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>,
-// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is
-// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
-// version.
-// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
- ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
- (defined(__cplusplus) && __cplusplus > 201402))
-// #define Y_ABSL_HAVE_STD_ANY 1
-#define Y_ABSL_HAVE_STD_OPTIONAL 1
-#define Y_ABSL_HAVE_STD_VARIANT 1
+#elif defined(__cpp_lib_string_view)
+#define Y_ABSL_HAVE_STD_STRING_VIEW 1
+#elif defined(Y_ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define Y_ABSL_HAVE_STD_STRING_VIEW 1
#endif
@@ -811,6 +771,20 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_HWADDRESS_SANITIZER 1
#endif
+// Y_ABSL_HAVE_DATAFLOW_SANITIZER
+//
+// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis.
+#ifdef Y_ABSL_HAVE_DATAFLOW_SANITIZER
+#error "Y_ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set."
+#elif defined(DATAFLOW_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow
+// should also use -DDATAFLOW_SANITIZER.
+#define Y_ABSL_HAVE_DATAFLOW_SANITIZER 1
+#elif Y_ABSL_HAVE_FEATURE(dataflow_sanitizer)
+#define Y_ABSL_HAVE_DATAFLOW_SANITIZER 1
+#endif
+
// Y_ABSL_HAVE_LEAK_SANITIZER
//
// LeakSanitizer (or lsan) is a detector of memory leaks.
@@ -825,7 +799,7 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#ifdef Y_ABSL_HAVE_LEAK_SANITIZER
#error "Y_ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
#elif defined(LEAK_SANITIZER)
-// GCC provides no method for detecting the presense of the standalone
+// GCC provides no method for detecting the presence of the standalone
// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
// use -DLEAK_SANITIZER.
#define Y_ABSL_HAVE_LEAK_SANITIZER 1
@@ -873,7 +847,9 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// RTTI support.
#ifdef Y_ABSL_INTERNAL_HAS_RTTI
#error Y_ABSL_INTERNAL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
+#elif (defined(__GNUC__) && defined(__GXX_RTTI)) || \
+ (defined(_MSC_VER) && defined(_CPPRTTI)) || \
+ (!defined(__GNUC__) && !defined(_MSC_VER))
#define Y_ABSL_INTERNAL_HAS_RTTI 1
#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
@@ -884,7 +860,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error Y_ABSL_INTERNAL_HAVE_SSE cannot be directly set
#elif defined(__SSE__)
#define Y_ABSL_INTERNAL_HAVE_SSE 1
-#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \
+ !defined(_M_ARM64EC)
// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
// indicates that at least SSE was targeted with the /arch:SSE option.
// All x86-64 processors support SSE, so support can be assumed.
@@ -899,7 +876,8 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error Y_ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
#elif defined(__SSE2__)
#define Y_ABSL_INTERNAL_HAVE_SSE2 1
-#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \
+ !defined(_M_ARM64EC)
// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
// All x86-64 processors support SSE2, so support can be assumed.
@@ -946,4 +924,24 @@ static_assert(Y_ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define Y_ABSL_HAVE_CONSTANT_EVALUATED 1
#endif
+// Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
+// into an integer that can be compared against.
+#ifdef Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#error Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set
+#endif
+#ifdef __EMSCRIPTEN__
+#error #include <emscripten/version.h>
+#ifdef __EMSCRIPTEN_major__
+#if __EMSCRIPTEN_minor__ >= 1000
+#error __EMSCRIPTEN_minor__ is too big to fit in Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#if __EMSCRIPTEN_tiny__ >= 1000
+#error __EMSCRIPTEN_tiny__ is too big to fit in Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#define Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION \
+ ((__EMSCRIPTEN_major__)*1000000 + (__EMSCRIPTEN_minor__)*1000 + \
+ (__EMSCRIPTEN_tiny__))
+#endif
+#endif
+
#endif // Y_ABSL_BASE_CONFIG_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h
index 1baab26165..1f486bc97e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/dynamic_annotations.h
@@ -46,6 +46,7 @@
#define Y_ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
#include <stddef.h>
+#include <stdint.h>
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
@@ -53,6 +54,10 @@
#include "y_absl/base/macros.h"
#endif
+#ifdef Y_ABSL_HAVE_HWADDRESS_SANITIZER
+#include <sanitizer/hwasan_interface.h>
+#endif
+
// TODO(rogeeff): Remove after the backward compatibility period.
#include "y_absl/base/internal/dynamic_annotations.h" // IWYU pragma: export
@@ -111,7 +116,7 @@
#if Y_ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
-// defined by the compiler-based santizer implementation, not by the Abseil
+// defined by the compiler-based sanitizer implementation, not by the Abseil
// library. Therefore they do not use Y_ABSL_INTERNAL_C_SYMBOL.
// -------------------------------------------------------------
@@ -457,6 +462,26 @@ Y_ABSL_NAMESPACE_END
#endif // Y_ABSL_HAVE_ADDRESS_SANITIZER
// -------------------------------------------------------------------------
+// HWAddress sanitizer annotations
+
+#ifdef __cplusplus
+namespace y_absl {
+#ifdef Y_ABSL_HAVE_HWADDRESS_SANITIZER
+// Under HWASAN changes the tag of the pointer.
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t tag) {
+ return reinterpret_cast<T*>(__hwasan_tag_pointer(ptr, tag));
+}
+#else
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t) {
+ return ptr;
+}
+#endif
+} // namespace y_absl
+#endif
+
+// -------------------------------------------------------------------------
// Undefine the macros intended only for this file.
#undef Y_ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
index cbee25d1ab..0c459db700 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
@@ -72,7 +72,7 @@ namespace base_internal {
// Platform specific logic extracted from
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
- off64_t offset) noexcept {
+ off_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
defined(__m68k__) || defined(__sh__) || \
(defined(__hppa__) && !defined(__LP64__)) || \
@@ -102,7 +102,7 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
#else
return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd,
- static_cast<off_t>(offset / pagesize)));
+ static_cast<unsigned long>(offset / pagesize))); // NOLINT
#endif
#elif defined(__s390x__)
// On s390x, mmap() arguments are passed in memory.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
index e84a396360..420f88661a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
@@ -42,25 +42,25 @@
#include <windows.h>
#endif
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif
+
#include <string.h>
+
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstddef>
-#include <new> // for placement-new
+#include <new> // for placement-new
#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/spinlock.h"
-// MAP_ANONYMOUS
-#if defined(__APPLE__)
-// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
-// deprecated. In Darwin, MAP_ANON is all there is.
-#if !defined MAP_ANONYMOUS
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
-#endif // !MAP_ANONYMOUS
-#endif // __APPLE__
+#endif
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -122,7 +122,7 @@ static int IntLog2(size_t size, size_t base) {
static int Random(uint32_t *state) {
uint32_t r = *state;
int result = 1;
- while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+ while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
*state = r;
@@ -144,7 +144,7 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
- if (level > kMaxLevel-1) level = kMaxLevel - 1;
+ if (level > kMaxLevel - 1) level = kMaxLevel - 1;
Y_ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
@@ -153,8 +153,8 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
-static AllocList *LLA_SkiplistSearch(AllocList *head,
- AllocList *e, AllocList **prev) {
+static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
+ AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
@@ -190,7 +190,7 @@ static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
- head->levels--; // reduce head->levels if level unused
+ head->levels--; // reduce head->levels if level unused
}
}
@@ -249,9 +249,9 @@ void CreateGlobalArenas() {
// Returns a global arena that does not call into hooks. Used by NewArena()
// when kCallMallocHook is not set.
-LowLevelAlloc::Arena* UnhookedArena() {
+LowLevelAlloc::Arena *UnhookedArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
}
#ifndef Y_ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
@@ -269,7 +269,7 @@ LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
}
// magic numbers to identify allocated and unallocated blocks
@@ -356,8 +356,7 @@ LowLevelAlloc::Arena::Arena(uint32_t flags_value)
min_size(2 * round_up),
random(0) {
freelist.header.size = 0;
- freelist.header.magic =
- Magic(kMagicUnallocated, &freelist.header);
+ freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
freelist.header.arena = this;
freelist.levels = 0;
memset(freelist.next, 0, sizeof(freelist.next));
@@ -375,7 +374,7 @@ LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
meta_data_arena = UnhookedArena();
}
Arena *result =
- new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+ new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
return result;
}
@@ -480,8 +479,8 @@ static void Coalesce(AllocList *a) {
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
- a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
- &arena->random);
+ a->levels =
+ LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
@@ -489,27 +488,27 @@ static void Coalesce(AllocList *a) {
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
+ AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+ sizeof(f->header));
Y_ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
Y_ABSL_RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
- f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
- &arena->random);
+ f->levels =
+ LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
- Coalesce(f); // maybe coalesce with successor
- Coalesce(prev[0]); // maybe coalesce with predecessor
+ Coalesce(f); // maybe coalesce with successor
+ Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != nullptr) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
+ AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+ sizeof(f->header));
LowLevelAlloc::Arena *arena = f->header.arena;
ArenaLock section(arena);
AddToFreelist(v, arena);
@@ -524,21 +523,21 @@ void LowLevelAlloc::Free(void *v) {
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = nullptr;
if (request != 0) {
- AllocList *s; // will point to region that satisfies request
+ AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
// round up with header
- size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
- arena->round_up);
- for (;;) { // loop until we find a suitable region
+ size_t req_rnd =
+ RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
+ for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
- if (i < arena->freelist.levels) { // potential blocks exist
+ if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != nullptr &&
s->header.size < req_rnd) {
before = s;
}
- if (s != nullptr) { // we found a region
+ if (s != nullptr) { // we found a region
break;
}
}
@@ -550,7 +549,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages;
#ifdef _WIN32
- new_pages = VirtualAlloc(0, new_pages_size,
+ new_pages = VirtualAlloc(nullptr, new_pages_size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
Y_ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else
@@ -570,6 +569,18 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
Y_ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
}
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+ // Attempt to name the allocated address range in /proc/$PID/smaps on
+ // Linux.
+ //
+ // This invocation of prctl() may fail if the Linux kernel was not
+ // configured with the CONFIG_ANON_VMA_NAME option. This is OK since
+ // the naming of arenas is primarily a debugging aid.
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
+ "y_absl");
+#endif
+#endif // __linux__
#endif // _WIN32
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
@@ -580,12 +591,12 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
- LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
+ LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
// big enough to split
- AllocList *n = reinterpret_cast<AllocList *>
- (req_rnd + reinterpret_cast<char *>(s));
+ AllocList *n =
+ reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h
index 82b0216036..0709c4afd4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.h
@@ -46,7 +46,8 @@
// for more information.
#ifdef Y_ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
#error Y_ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
-#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \
+ defined(__hexagon__)
#define Y_ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h
deleted file mode 100644
index 0c9c358570..0000000000
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/prefetch.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2022 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef Y_ABSL_BASE_INTERNAL_PREFETCH_H_
-#define Y_ABSL_BASE_INTERNAL_PREFETCH_H_
-
-#include "y_absl/base/config.h"
-
-#ifdef __SSE__
-#include <xmmintrin.h>
-#endif
-
-#if defined(_MSC_VER) && defined(Y_ABSL_INTERNAL_HAVE_SSE)
-#include <intrin.h>
-#pragma intrinsic(_mm_prefetch)
-#endif
-
-// Compatibility wrappers around __builtin_prefetch, to prefetch data
-// for read if supported by the toolchain.
-
-// Move data into the cache before it is read, or "prefetch" it.
-//
-// The value of `addr` is the address of the memory to prefetch. If
-// the target and compiler support it, data prefetch instructions are
-// generated. If the prefetch is done some time before the memory is
-// read, it may be in the cache by the time the read occurs.
-//
-// The function names specify the temporal locality heuristic applied,
-// using the names of Intel prefetch instructions:
-//
-// T0 - high degree of temporal locality; data should be left in as
-// many levels of the cache possible
-// T1 - moderate degree of temporal locality
-// T2 - low degree of temporal locality
-// Nta - no temporal locality, data need not be left in the cache
-// after the read
-//
-// Incorrect or gratuitous use of these functions can degrade
-// performance, so use them only when representative benchmarks show
-// an improvement.
-//
-// Example usage:
-//
-// y_absl::base_internal::PrefetchT0(addr);
-//
-// Currently, the different prefetch calls behave on some Intel
-// architectures as follows:
-//
-// SNB..SKL SKX
-// PrefetchT0() L1/L2/L3 L1/L2
-// PrefetchT1() L2/L3 L2
-// PrefetchT2() L2/L3 L2
-// PrefetchNta() L1/--/L3 L1*
-//
-// * On SKX PrefetchNta() will bring the line into L1 but will evict
-// from L3 cache. This might result in surprising behavior.
-//
-// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon.
-//
-namespace y_absl {
-Y_ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-void PrefetchT0(const void* addr);
-void PrefetchT1(const void* addr);
-void PrefetchT2(const void* addr);
-void PrefetchNta(const void* addr);
-
-// Implementation details follow.
-
-#if Y_ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
-
-#define Y_ABSL_INTERNAL_HAVE_PREFETCH 1
-
-// See __builtin_prefetch:
-// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
-//
-// These functions speculatively load for read only. This is
-// safe for all currently supported platforms. However, prefetch for
-// store may have problems depending on the target platform.
-//
-inline void PrefetchT0(const void* addr) {
- // Note: this uses prefetcht0 on Intel.
- __builtin_prefetch(addr, 0, 3);
-}
-inline void PrefetchT1(const void* addr) {
- // Note: this uses prefetcht1 on Intel.
- __builtin_prefetch(addr, 0, 2);
-}
-inline void PrefetchT2(const void* addr) {
- // Note: this uses prefetcht2 on Intel.
- __builtin_prefetch(addr, 0, 1);
-}
-inline void PrefetchNta(const void* addr) {
- // Note: this uses prefetchtnta on Intel.
- __builtin_prefetch(addr, 0, 0);
-}
-
-#elif defined(Y_ABSL_INTERNAL_HAVE_SSE)
-
-#define Y_ABSL_INTERNAL_HAVE_PREFETCH 1
-
-inline void PrefetchT0(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
-}
-inline void PrefetchT1(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
-}
-inline void PrefetchT2(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
-}
-inline void PrefetchNta(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
-}
-
-#else
-inline void PrefetchT0(const void*) {}
-inline void PrefetchT1(const void*) {}
-inline void PrefetchT2(const void*) {}
-inline void PrefetchNta(const void*) {}
-#endif
-
-} // namespace base_internal
-Y_ABSL_NAMESPACE_END
-} // namespace y_absl
-
-#endif // Y_ABSL_BASE_INTERNAL_PREFETCH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
index b480eca06d..a820cd1715 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
@@ -21,6 +21,10 @@
#include <cstring>
#include <util/generic/string.h>
+#ifdef __EMSCRIPTEN__
+#error #include <emscripten/console.h>
+#endif
+
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/atomic_hook.h"
@@ -173,7 +177,7 @@ void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- AsyncSignalSafeWriteToStderr(buffer, strlen(buffer));
+ AsyncSignalSafeWriteError(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
@@ -201,9 +205,34 @@ void DefaultInternalLog(y_absl::LogSeverity severity, const char* file, int line
} // namespace
-void AsyncSignalSafeWriteToStderr(const char* s, size_t len) {
+void AsyncSignalSafeWriteError(const char* s, size_t len) {
+ if (!len) return;
y_absl::base_internal::ErrnoSaver errno_saver;
-#if defined(Y_ABSL_HAVE_SYSCALL_WRITE)
+#if defined(__EMSCRIPTEN__)
+ // In WebAssembly, bypass filesystem emulation via fwrite.
+ if (s[len - 1] == '\n') {
+ // Skip a trailing newline character as emscripten_errn adds one itself.
+ len--;
+ }
+ // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+ // until 3.1.43.
+#if Y_ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+ emscripten_errn(s, len);
+#else
+ char buf[kLogBufSize];
+ if (len >= kLogBufSize) {
+ len = kLogBufSize - 1;
+ constexpr size_t trunc_len = sizeof(kTruncated) - 2;
+ memcpy(buf + len - trunc_len, kTruncated, trunc_len);
+ buf[len] = '\0';
+ len -= trunc_len;
+ } else {
+ buf[len] = '\0';
+ }
+ memcpy(buf, s, len);
+ _emscripten_err(buf);
+#endif
+#elif defined(Y_ABSL_HAVE_SYSCALL_WRITE)
// We prefer calling write via `syscall` to minimize the risk of libc doing
// something "helpful".
syscall(SYS_write, STDERR_FILENO, s, len);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
index 0a10d71630..c44bb9dfcc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
@@ -48,6 +48,7 @@
::y_absl::raw_log_internal::RawLog(Y_ABSL_RAW_LOG_INTERNAL_##severity, \
absl_raw_log_internal_basename, __LINE__, \
__VA_ARGS__); \
+ Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \
} while (0)
// Similar to CHECK(condition) << message, but for low-level modules:
@@ -77,8 +78,7 @@
::y_absl::raw_log_internal::internal_log_function( \
Y_ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \
__LINE__, message); \
- if (Y_ABSL_RAW_LOG_INTERNAL_##severity == ::y_absl::LogSeverity::kFatal) \
- Y_ABSL_UNREACHABLE(); \
+ Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \
} while (0)
#define Y_ABSL_INTERNAL_CHECK(condition, message) \
@@ -90,6 +90,20 @@
} \
} while (0)
+#ifndef NDEBUG
+
+#define Y_ABSL_RAW_DLOG(severity, ...) Y_ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define Y_ABSL_RAW_DCHECK(condition, message) Y_ABSL_RAW_CHECK(condition, message)
+
+#else // NDEBUG
+
+#define Y_ABSL_RAW_DLOG(severity, ...) \
+ while (false) Y_ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define Y_ABSL_RAW_DCHECK(condition, message) \
+ while (false) Y_ABSL_RAW_CHECK(condition, message)
+
+#endif // NDEBUG
+
#define Y_ABSL_RAW_LOG_INTERNAL_INFO ::y_absl::LogSeverity::kInfo
#define Y_ABSL_RAW_LOG_INTERNAL_WARNING ::y_absl::LogSeverity::kWarning
#define Y_ABSL_RAW_LOG_INTERNAL_ERROR ::y_absl::LogSeverity::kError
@@ -97,6 +111,12 @@
#define Y_ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \
::y_absl::NormalizeLogSeverity(severity)
+#define Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO
+#define Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING
+#define Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR
+#define Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL Y_ABSL_UNREACHABLE()
+#define Y_ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity)
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace raw_log_internal {
@@ -109,8 +129,8 @@ void RawLog(y_absl::LogSeverity severity, const char* file, int line,
const char* format, ...) Y_ABSL_PRINTF_ATTRIBUTE(4, 5);
// Writes the provided buffer directly to stderr, in a signal-safe, low-level
-// manner.
-void AsyncSignalSafeWriteToStderr(const char* s, size_t len);
+// manner. Preserves errno.
+void AsyncSignalSafeWriteError(const char* s, size_t len);
// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
index 0172cb3311..3c2169ac94 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
@@ -41,6 +41,7 @@
#include <string.h>
#include <cassert>
+#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
@@ -159,7 +160,7 @@ static double GetNominalCPUFrequency() {
DWORD type = 0;
DWORD data = 0;
DWORD data_size = sizeof(data);
- auto result = RegQueryValueExA(key, "~MHz", 0, &type,
+ auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
reinterpret_cast<LPBYTE>(&data), &data_size);
RegCloseKey(key);
if (result == ERROR_SUCCESS && type == REG_DWORD &&
@@ -189,7 +190,13 @@ static double GetNominalCPUFrequency() {
// and the memory location pointed to by value is set to the value read.
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
- int fd = open(file, O_RDONLY | O_CLOEXEC);
+#if defined(_POSIX_C_SOURCE)
+ const int file_mode = (O_RDONLY | O_CLOEXEC);
+#else
+ const int file_mode = O_RDONLY;
+#endif
+
+ int fd = open(file, file_mode);
if (fd != -1) {
char line[1024];
char *err;
@@ -225,8 +232,8 @@ static int64_t ReadMonotonicClockNanos() {
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
- perror("clock_gettime() failed");
- abort();
+ Y_ABSL_INTERNAL_LOG(
+ FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
@@ -414,82 +421,33 @@ pid_t GetTID() {
return tid;
}
-#else
+#elif defined(__APPLE__)
-// Fallback implementation of GetTID using pthread_getspecific.
-Y_ABSL_CONST_INIT static once_flag tid_once;
-Y_ABSL_CONST_INIT static pthread_key_t tid_key;
-Y_ABSL_CONST_INIT static y_absl::base_internal::SpinLock tid_lock(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// We set a bit per thread in this array to indicate that an ID is in
-// use. ID 0 is unused because it is the default value returned by
-// pthread_getspecific().
-Y_ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
- Y_ABSL_GUARDED_BY(tid_lock) = nullptr;
-static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
-
-// Returns the TID to tid_array.
-static void FreeTID(void *v) {
- intptr_t tid = reinterpret_cast<intptr_t>(v);
- intptr_t word = tid / kBitsPerWord;
- uint32_t mask = ~(1u << (tid % kBitsPerWord));
- y_absl::base_internal::SpinLockHolder lock(&tid_lock);
- assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
- (*tid_array)[static_cast<size_t>(word)] &= mask;
+pid_t GetTID() {
+ uint64_t tid;
+ // `nullptr` here implies this thread. This only fails if the specified
+ // thread is invalid or the pointer-to-tid is null, so we needn't worry about
+ // it.
+ pthread_threadid_np(nullptr, &tid);
+ return static_cast<pid_t>(tid);
}
-static void InitGetTID() {
- if (pthread_key_create(&tid_key, FreeTID) != 0) {
- // The logging system calls GetTID() so it can't be used here.
- perror("pthread_key_create failed");
- abort();
- }
-
- // Initialize tid_array.
- y_absl::base_internal::SpinLockHolder lock(&tid_lock);
- tid_array = new std::vector<uint32_t>(1);
- (*tid_array)[0] = 1; // ID 0 is never-allocated.
-}
+#elif defined(__native_client__)
-// Return a per-thread small integer ID from pthread's thread-specific data.
pid_t GetTID() {
- y_absl::call_once(tid_once, InitGetTID);
-
- intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
- if (tid != 0) {
- return static_cast<pid_t>(tid);
- }
-
- int bit; // tid_array[word] = 1u << bit;
- size_t word;
- {
- // Search for the first unused ID.
- y_absl::base_internal::SpinLockHolder lock(&tid_lock);
- // First search for a word in the array that is not all ones.
- word = 0;
- while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
- ++word;
- }
- if (word == tid_array->size()) {
- tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
- }
- // Search for a zero bit in the word.
- bit = 0;
- while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
- ++bit;
- }
- tid =
- static_cast<intptr_t>((word * kBitsPerWord) + static_cast<size_t>(bit));
- (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
- }
+ auto* thread = pthread_self();
+ static_assert(sizeof(pid_t) == sizeof(thread),
+ "In NaCL int expected to be the same size as a pointer");
+ return reinterpret_cast<pid_t>(thread);
+}
- if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
- perror("pthread_setspecific failed");
- abort();
- }
+#else
- return static_cast<pid_t>(tid);
+// Fallback implementation of `GetTID` using `pthread_self`.
+pid_t GetTID() {
+ // `pthread_t` need not be arithmetic per POSIX; platforms where it isn't
+ // should be handled above.
+ return static_cast<pid_t>(pthread_self());
}
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
index 7f5c39a7d1..c834f4879a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
@@ -58,18 +58,19 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// that protected visibility is unsupported.
Y_ABSL_CONST_INIT // Must come before __attribute__((visibility("protected")))
#if Y_ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
-__attribute__((visibility("protected")))
+ __attribute__((visibility("protected")))
#endif // Y_ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if Y_ABSL_PER_THREAD_TLS
-// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
-Y_ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+ // Prefer __thread to thread_local as benchmarks indicate it is a bit
+ // faster.
+ Y_ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
#elif defined(Y_ABSL_HAVE_THREAD_LOCAL)
-thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+ thread_local ThreadIdentity* thread_identity_ptr = nullptr;
#endif // Y_ABSL_PER_THREAD_TLS
#endif // TLS or CPP11
-void SetCurrentThreadIdentity(
- ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+ ThreadIdentityReclaimerFunction reclaimer) {
assert(CurrentThreadIdentityIfPresent() == nullptr);
// Associate our destructor.
// NOTE: This call to pthread_setspecific is currently the only immovable
@@ -79,7 +80,7 @@ void SetCurrentThreadIdentity(
y_absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
reclaimer);
-#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
+#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) || defined(__hexagon__)
// Emscripten and MinGW pthread implementations does not support signals.
// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
// for more information.
@@ -134,7 +135,7 @@ void ClearCurrentThreadIdentity() {
Y_ABSL_THREAD_IDENTITY_MODE == Y_ABSL_THREAD_IDENTITY_MODE_USE_CPP11
thread_identity_ptr = nullptr;
#elif Y_ABSL_THREAD_IDENTITY_MODE == \
- Y_ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ Y_ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
// pthread_setspecific expected to clear value on destruction
assert(CurrentThreadIdentityIfPresent() == nullptr);
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
index 617a220631..c42236b8f3 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
@@ -62,8 +62,8 @@ struct PerThreadSynch {
return reinterpret_cast<ThreadIdentity*>(this);
}
- PerThreadSynch *next; // Circular waiter queue; initialized to 0.
- PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
+ PerThreadSynch* next; // Circular waiter queue; initialized to 0.
+ PerThreadSynch* skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
@@ -104,10 +104,7 @@ struct PerThreadSynch {
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
- enum State {
- kAvailable,
- kQueued
- };
+ enum State { kAvailable, kQueued };
std::atomic<State> state;
// The wait parameters of the current wait. waitp is null if the
@@ -122,14 +119,14 @@ struct PerThreadSynch {
// pointer unchanged.
SynchWaitParams* waitp;
- intptr_t readers; // Number of readers in mutex.
+ intptr_t readers; // Number of readers in mutex.
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
- SynchLocksHeld *all_locks;
+ SynchLocksHeld* all_locks;
};
// The instances of this class are allocated in NewThreadIdentity() with an
@@ -147,7 +144,7 @@ struct ThreadIdentity {
// Private: Reserved for y_absl::synchronization_internal::Waiter.
struct WaiterState {
- alignas(void*) char data[128];
+ alignas(void*) char data[256];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -170,7 +167,10 @@ struct ThreadIdentity {
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
-// is handled internally within tcmalloc's initialization already.
+// is handled internally within tcmalloc's initialization already. Note that
+// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
+// on Apple platforms. Whatever function is calling your MallocHooks will need
+// to watch for recursion on Apple platforms.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
@@ -217,7 +217,7 @@ void ClearCurrentThreadIdentity();
#define Y_ABSL_THREAD_IDENTITY_MODE Y_ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(Y_ABSL_HAVE_THREAD_LOCAL)
#define Y_ABSL_THREAD_IDENTITY_MODE Y_ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif Y_ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+#elif Y_ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
index e938d7dcc2..85707a50b2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
@@ -26,22 +26,13 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace base_internal {
-// NOTE: The various STL exception throwing functions are placed within the
-// #ifdef blocks so the symbols aren't exposed on platforms that don't support
-// them, such as the Android NDK. For example, ANGLE fails to link when building
-// within AOSP without them, since the STL functions don't exist.
-namespace {
-#ifdef Y_ABSL_HAVE_EXCEPTIONS
-template <typename T>
-[[noreturn]] void Throw(const T& error) {
- throw error;
-}
-#endif
-} // namespace
+// NOTE: The exception types, like `std::logic_error`, do not exist on all
+// platforms. (For example, the Android NDK does not have them.)
+// Therefore, their use must be guarded by `#ifdef` or equivalent.
void ThrowStdLogicError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ throw std::logic_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -49,7 +40,7 @@ void ThrowStdLogicError(const TString& what_arg) {
}
void ThrowStdLogicError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ throw std::logic_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -57,7 +48,7 @@ void ThrowStdLogicError(const char* what_arg) {
}
void ThrowStdInvalidArgument(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ throw std::invalid_argument(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -65,7 +56,7 @@ void ThrowStdInvalidArgument(const TString& what_arg) {
}
void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ throw std::invalid_argument(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -74,7 +65,7 @@ void ThrowStdInvalidArgument(const char* what_arg) {
void ThrowStdDomainError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ throw std::domain_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -82,7 +73,7 @@ void ThrowStdDomainError(const TString& what_arg) {
}
void ThrowStdDomainError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ throw std::domain_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -91,7 +82,7 @@ void ThrowStdDomainError(const char* what_arg) {
void ThrowStdLengthError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ throw std::length_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -99,7 +90,7 @@ void ThrowStdLengthError(const TString& what_arg) {
}
void ThrowStdLengthError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ throw std::length_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -108,7 +99,7 @@ void ThrowStdLengthError(const char* what_arg) {
void ThrowStdOutOfRange(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ throw std::out_of_range(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -116,7 +107,7 @@ void ThrowStdOutOfRange(const TString& what_arg) {
}
void ThrowStdOutOfRange(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ throw std::out_of_range(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -125,7 +116,7 @@ void ThrowStdOutOfRange(const char* what_arg) {
void ThrowStdRuntimeError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ throw std::runtime_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -133,7 +124,7 @@ void ThrowStdRuntimeError(const TString& what_arg) {
}
void ThrowStdRuntimeError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ throw std::runtime_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -142,7 +133,7 @@ void ThrowStdRuntimeError(const char* what_arg) {
void ThrowStdRangeError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ throw std::range_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -150,7 +141,7 @@ void ThrowStdRangeError(const TString& what_arg) {
}
void ThrowStdRangeError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ throw std::range_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -159,7 +150,7 @@ void ThrowStdRangeError(const char* what_arg) {
void ThrowStdOverflowError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ throw std::overflow_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -167,7 +158,7 @@ void ThrowStdOverflowError(const TString& what_arg) {
}
void ThrowStdOverflowError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ throw std::overflow_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -176,7 +167,7 @@ void ThrowStdOverflowError(const char* what_arg) {
void ThrowStdUnderflowError(const TString& what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ throw std::underflow_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -184,7 +175,7 @@ void ThrowStdUnderflowError(const TString& what_arg) {
}
void ThrowStdUnderflowError(const char* what_arg) {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ throw std::underflow_error(what_arg);
#else
Y_ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -193,7 +184,7 @@ void ThrowStdUnderflowError(const char* what_arg) {
void ThrowStdBadFunctionCall() {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_function_call());
+ throw std::bad_function_call();
#else
std::abort();
#endif
@@ -201,7 +192,7 @@ void ThrowStdBadFunctionCall() {
void ThrowStdBadAlloc() {
#ifdef Y_ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_alloc());
+ throw std::bad_alloc();
#else
std::abort();
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
index 4f959fadef..afd1a8b110 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
@@ -71,13 +71,12 @@ int64_t UnscaledCycleClock::Now() {
#else
int32_t tbu, tbl, tmp;
asm volatile(
- "0:\n"
"mftbu %[hi32]\n"
"mftb %[lo32]\n"
"mftbu %[tmp]\n"
"cmpw %[tmp],%[hi32]\n"
- "bne 0b\n"
- : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp));
+ "bne $-16\n" // Retry on failure.
+ : [hi32] "=r"(tbu), [lo32] "=r"(tbl), [tmp] "=r"(tmp));
return (static_cast<int64_t>(tbu) << 32) | tbl;
#endif
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
index 98a42d1c67..c8805bce3c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/options.h
@@ -200,7 +200,7 @@
// allowed.
#define Y_ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define Y_ABSL_OPTION_INLINE_NAMESPACE_NAME lts_y_20230125
+#define Y_ABSL_OPTION_INLINE_NAMESPACE_NAME lts_y_20230802
// Y_ABSL_OPTION_HARDENED
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h
index 04f00a81bf..c55b659116 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/policy_checks.h
@@ -44,10 +44,10 @@
// Toolchain Check
// -----------------------------------------------------------------------------
-// We support Visual Studio 2017 (MSVC++ 15.0) and later.
+// We support Visual Studio 2019 (MSVC++ 16.0) and later.
// This minimum will go up.
-#if defined(_MSC_VER) && _MSC_VER < 1910 && !defined(__clang__)
-#error "This package requires Visual Studio 2017 (MSVC++ 15.0) or higher."
+#if defined(_MSC_VER) && _MSC_VER < 1920 && !defined(__clang__)
+#error "This package requires Visual Studio 2019 (MSVC++ 16.0) or higher."
#endif
// We support GCC 7 and later.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/prefetch.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/prefetch.h
new file mode 100644
index 0000000000..1d30f01547
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/prefetch.h
@@ -0,0 +1,198 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: prefetch.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines prefetch functions to prefetch memory contents
+// into the first level cache (L1) for the current CPU. The prefetch logic
+// offered in this header is limited to prefetching first level cachelines
+// only, and is aimed at relatively 'simple' prefetching logic.
+//
+#ifndef Y_ABSL_BASE_PREFETCH_H_
+#define Y_ABSL_BASE_PREFETCH_H_
+
+#include "y_absl/base/config.h"
+
+#if defined(Y_ABSL_INTERNAL_HAVE_SSE)
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900 && \
+ (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// This method prefetches data with the highest degree of temporal locality;
+// data is prefetched where possible into all levels of the cache.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// // Computes incremental checksum for `data`.
+// int ComputeChecksum(int sum, y_absl::string_view data);
+//
+// // Computes cumulative checksum for all values in `data`
+// int ComputeChecksum(y_absl::Span<const TString> data) {
+// int sum = 0;
+// auto it = data.begin();
+// auto pit = data.begin();
+// auto end = data.end();
+// for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) {
+// y_absl::PrefetchToLocalCache(pit->data());
+// }
+// for (; pit != end; ++pit, ++it) {
+// sum = ComputeChecksum(sum, *it);
+// y_absl::PrefetchToLocalCache(pit->data());
+// }
+// for (; it != end; ++it) {
+// sum = ComputeChecksum(sum, *it);
+// }
+// return sum;
+// }
+//
+void PrefetchToLocalCache(const void* addr);
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// This function is identical to `PrefetchToLocalCache()` except that it has
+// non-temporal locality: the fetched data should not be left in any of the
+// cache tiers. This is useful for cases where the data is used only once /
+// short term, for example, invoking a destructor on an object.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// template <typename Iterator>
+// void DestroyPointers(Iterator begin, Iterator end) {
+// size_t distance = std::min(8U, bars.size());
+//
+// int dist = 8;
+// auto prefetch_it = begin;
+// while (prefetch_it != end && --dist;) {
+// y_absl::PrefetchToLocalCacheNta(*prefetch_it++);
+// }
+// while (prefetch_it != end) {
+// delete *begin++;
+// y_absl::PrefetchToLocalCacheNta(*prefetch_it++);
+// }
+// while (begin != end) {
+// delete *begin++;
+// }
+// }
+//
+void PrefetchToLocalCacheNta(const void* addr);
+
+// Moves data into the L1 cache with the intent to modify it.
+//
+// This function is similar to `PrefetchToLocalCache()` except that it
+// prefetches cachelines with an 'intent to modify' This typically includes
+// invalidating cache entries for this address in all other cache tiers, and an
+// exclusive access intent.
+//
+// Incorrect or gratuitous use of this function can degrade performance. As this
+// function can invalidate cached cachelines on other caches and computer cores,
+// incorrect usage of this function can have an even greater negative impact
+// than incorrect regular prefetches.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// void* Arena::Allocate(size_t size) {
+// void* ptr = AllocateBlock(size);
+// y_absl::PrefetchToLocalCacheForWrite(p);
+// return ptr;
+// }
+//
+void PrefetchToLocalCacheForWrite(const void* addr);
+
+#if Y_ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define Y_ABSL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+inline void PrefetchToLocalCache(const void* addr) {
+ __builtin_prefetch(addr, 0, 3);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+ __builtin_prefetch(addr, 0, 0);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+ // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1)
+ // unless -march=broadwell or newer; this is not generally the default, so we
+ // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel
+ // processors and has been present on AMD processors since the K6-2.
+#if defined(__x86_64__)
+ asm("prefetchw (%0)" : : "r"(addr));
+#else
+ __builtin_prefetch(addr, 1, 3);
+#endif
+}
+
+#elif defined(Y_ABSL_INTERNAL_HAVE_SSE)
+
+#define Y_ABSL_HAVE_PREFETCH 1
+
+inline void PrefetchToLocalCache(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+#if defined(_MM_HINT_ET0)
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_ET0);
+#elif !defined(_MSC_VER) && defined(__x86_64__)
+ // _MM_HINT_ET0 is not universally supported. As we commented further
+ // up, PREFETCHW is recognized as a no-op on older Intel processors
+ // and has been present on AMD processors since the K6-2. We have this
+ // disabled for MSVC compilers as this miscompiles on older MSVC compilers.
+ asm("prefetchw (%0)" : : "r"(addr));
+#endif
+}
+
+#else
+
+inline void PrefetchToLocalCache(const void* addr) {}
+inline void PrefetchToLocalCacheNta(const void* addr) {}
+inline void PrefetchToLocalCacheForWrite(const void* addr) {}
+
+#endif
+
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_BASE_PREFETCH_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
index 83c817448b..fe32c20c8e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/fixed_array.h
@@ -117,14 +117,20 @@ class FixedArray {
(N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
: static_cast<size_type>(N));
- FixedArray(
- const FixedArray& other,
- const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
+ FixedArray(const FixedArray& other) noexcept(NoexceptCopyable())
+ : FixedArray(other,
+ AllocatorTraits::select_on_container_copy_construction(
+ other.storage_.alloc())) {}
+
+ FixedArray(const FixedArray& other,
+ const allocator_type& a) noexcept(NoexceptCopyable())
: FixedArray(other.begin(), other.end(), a) {}
- FixedArray(
- FixedArray&& other,
- const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
+ FixedArray(FixedArray&& other) noexcept(NoexceptMovable())
+ : FixedArray(std::move(other), other.storage_.alloc()) {}
+
+ FixedArray(FixedArray&& other,
+ const allocator_type& a) noexcept(NoexceptMovable())
: FixedArray(std::make_move_iterator(other.begin()),
std::make_move_iterator(other.end()), a) {}
@@ -200,18 +206,22 @@ class FixedArray {
//
// Returns a const T* pointer to elements of the `FixedArray`. This pointer
// can be used to access (but not modify) the contained elements.
- const_pointer data() const { return AsValueType(storage_.begin()); }
+ const_pointer data() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return AsValueType(storage_.begin());
+ }
// Overload of FixedArray::data() to return a T* pointer to elements of the
// fixed array. This pointer can be used to access and modify the contained
// elements.
- pointer data() { return AsValueType(storage_.begin()); }
+ pointer data() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return AsValueType(storage_.begin());
+ }
// FixedArray::operator[]
//
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- reference operator[](size_type i) {
+ reference operator[](size_type i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -219,7 +229,7 @@ class FixedArray {
// Overload of FixedArray::operator()[] to return a const reference to the
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- const_reference operator[](size_type i) const {
+ const_reference operator[](size_type i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -228,7 +238,7 @@ class FixedArray {
//
// Bounds-checked access. Returns a reference to the ith element of the fixed
// array, or throws std::out_of_range
- reference at(size_type i) {
+ reference at(size_type i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (Y_ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
@@ -237,7 +247,7 @@ class FixedArray {
// Overload of FixedArray::at() to return a const reference to the ith element
// of the fixed array.
- const_reference at(size_type i) const {
+ const_reference at(size_type i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (Y_ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
@@ -247,14 +257,14 @@ class FixedArray {
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
- reference front() {
+ reference front() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
- const_reference front() const {
+ const_reference front() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
@@ -262,14 +272,14 @@ class FixedArray {
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
- reference back() {
+ reference back() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
- const_reference back() const {
+ const_reference back() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
@@ -277,62 +287,74 @@ class FixedArray {
// FixedArray::begin()
//
// Returns an iterator to the beginning of the fixed array.
- iterator begin() { return data(); }
+ iterator begin() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// Overload of FixedArray::begin() to return a const iterator to the
// beginning of the fixed array.
- const_iterator begin() const { return data(); }
+ const_iterator begin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// FixedArray::cbegin()
//
// Returns a const iterator to the beginning of the fixed array.
- const_iterator cbegin() const { return begin(); }
+ const_iterator cbegin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
// FixedArray::end()
//
// Returns an iterator to the end of the fixed array.
- iterator end() { return data() + size(); }
+ iterator end() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return data() + size(); }
// Overload of FixedArray::end() to return a const iterator to the end of the
// fixed array.
- const_iterator end() const { return data() + size(); }
+ const_iterator end() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// FixedArray::cend()
//
// Returns a const iterator to the end of the fixed array.
- const_iterator cend() const { return end(); }
+ const_iterator cend() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
// FixedArray::rbegin()
//
// Returns a reverse iterator from the end of the fixed array.
- reverse_iterator rbegin() { return reverse_iterator(end()); }
+ reverse_iterator rbegin() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(end());
+ }
// Overload of FixedArray::rbegin() to return a const reverse iterator from
// the end of the fixed array.
- const_reverse_iterator rbegin() const {
+ const_reverse_iterator rbegin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(end());
}
// FixedArray::crbegin()
//
// Returns a const reverse iterator from the end of the fixed array.
- const_reverse_iterator crbegin() const { return rbegin(); }
+ const_reverse_iterator crbegin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rbegin();
+ }
// FixedArray::rend()
//
// Returns a reverse iterator from the beginning of the fixed array.
- reverse_iterator rend() { return reverse_iterator(begin()); }
+ reverse_iterator rend() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(begin());
+ }
// Overload of FixedArray::rend() for returning a const reverse iterator
// from the beginning of the fixed array.
- const_reverse_iterator rend() const {
+ const_reverse_iterator rend() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(begin());
}
// FixedArray::crend()
//
// Returns a reverse iterator from the beginning of the fixed array.
- const_reverse_iterator crend() const { return rend(); }
+ const_reverse_iterator crend() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rend();
+ }
// FixedArray::fill()
//
@@ -342,7 +364,7 @@ class FixedArray {
// Relational operators. Equality operators are elementwise using
// `operator==`, while order operators order FixedArrays lexicographically.
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
- return y_absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
@@ -464,6 +486,9 @@ class FixedArray {
StorageElement* begin() const { return data_; }
StorageElement* end() const { return begin() + size(); }
allocator_type& alloc() { return size_alloc_.template get<1>(); }
+ const allocator_type& alloc() const {
+ return size_alloc_.template get<1>();
+ }
private:
static bool UsingInlinedStorage(size_type n) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
index e3d24a077e..c271317a98 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_map.h
@@ -235,7 +235,11 @@ class flat_hash_map : public y_absl::container_internal::raw_hash_map<
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h
index 3680fb24c7..5599a0019a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/flat_hash_set.h
@@ -227,7 +227,11 @@ class flat_hash_set
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
@@ -343,7 +347,7 @@ class flat_hash_set
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
- // functions be Swappable, and are exchaged using unqualified calls to
+ // functions be Swappable, and are exchanged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
index aaec71987e..f5df597aca 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/inlined_vector.h
@@ -77,8 +77,6 @@ class InlinedVector {
template <typename TheA>
using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
template <typename TheA>
- using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<TheA>;
- template <typename TheA>
using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk<TheA>;
template <typename TheA, typename Iterator>
@@ -182,14 +180,23 @@ class InlinedVector {
// provided `allocator`.
InlinedVector(const InlinedVector& other, const allocator_type& allocator)
: storage_(allocator) {
+ // Fast path: if the other vector is empty, there's nothing for us to do.
if (other.empty()) {
- // Empty; nothing to do.
- } else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
- // Memcpy-able and do not need allocation.
+ return;
+ }
+
+ // Fast path: if the value type is trivially copy constructible, we know the
+ // allocator doesn't do anything fancy, and there is nothing on the heap
+ // then we know it is legal for us to simply memcpy the other vector's
+ // inlined bytes to form our copy of its elements.
+ if (y_absl::is_trivially_copy_constructible<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value &&
+ !other.storage_.GetIsAllocated()) {
storage_.MemcpyFrom(other.storage_);
- } else {
- storage_.InitFrom(other.storage_);
+ return;
}
+
+ storage_.InitFrom(other.storage_);
}
// Creates an inlined vector by moving in the contents of `other` without
@@ -210,26 +217,38 @@ class InlinedVector {
y_absl::allocator_is_nothrow<allocator_type>::value ||
std::is_nothrow_move_constructible<value_type>::value)
: storage_(other.storage_.GetAllocator()) {
- if (IsMemcpyOk<A>::value) {
+ // Fast path: if the value type can be trivially relocated (i.e. moved from
+ // and destroyed), and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for `other`
+ // and remove its own reference to them. It's as if we had individually
+ // move-constructed each value and then destroyed the original.
+ if (y_absl::is_trivially_relocatable<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value) {
storage_.MemcpyFrom(other.storage_);
-
other.storage_.SetInlinedSize(0);
- } else if (other.storage_.GetIsAllocated()) {
+ return;
+ }
+
+ // Fast path: if the other vector is on the heap, we can simply take over
+ // its allocation.
+ if (other.storage_.GetIsAllocated()) {
storage_.SetAllocation({other.storage_.GetAllocatedData(),
other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
- } else {
- IteratorValueAdapter<A, MoveIterator<A>> other_values(
- MoveIterator<A>(other.storage_.GetInlinedData()));
+ return;
+ }
- inlined_vector_internal::ConstructElements<A>(
- storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
- other.storage_.GetSize());
+ // Otherwise we must move each element individually.
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
- storage_.SetInlinedSize(other.storage_.GetSize());
- }
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+ other.storage_.GetSize());
+
+ storage_.SetInlinedSize(other.storage_.GetSize());
}
// Creates an inlined vector by moving in the contents of `other` with a copy
@@ -244,22 +263,34 @@ class InlinedVector {
const allocator_type&
allocator) noexcept(y_absl::allocator_is_nothrow<allocator_type>::value)
: storage_(allocator) {
- if (IsMemcpyOk<A>::value) {
+ // Fast path: if the value type can be trivially relocated (i.e. moved from
+ // and destroyed), and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for `other`
+ // and remove its own reference to them. It's as if we had individually
+ // move-constructed each value and then destroyed the original.
+ if (y_absl::is_trivially_relocatable<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value) {
storage_.MemcpyFrom(other.storage_);
-
other.storage_.SetInlinedSize(0);
- } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
- other.storage_.GetIsAllocated()) {
+ return;
+ }
+
+ // Fast path: if the other vector is on the heap and shared the same
+ // allocator, we can simply take over its allocation.
+ if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
+ other.storage_.GetIsAllocated()) {
storage_.SetAllocation({other.storage_.GetAllocatedData(),
other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
- } else {
- storage_.Initialize(IteratorValueAdapter<A, MoveIterator<A>>(
- MoveIterator<A>(other.data())),
- other.size());
+ return;
}
+
+ // Otherwise we must move each element individually.
+ storage_.Initialize(
+ IteratorValueAdapter<A, MoveIterator<A>>(MoveIterator<A>(other.data())),
+ other.size());
}
~InlinedVector() {}
@@ -310,7 +341,7 @@ class InlinedVector {
// can be used to access and modify the contained elements.
//
// NOTE: only elements within [`data()`, `data() + size()`) are valid.
- pointer data() noexcept {
+ pointer data() noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
: storage_.GetInlinedData();
}
@@ -320,7 +351,7 @@ class InlinedVector {
// modify the contained elements.
//
// NOTE: only elements within [`data()`, `data() + size()`) are valid.
- const_pointer data() const noexcept {
+ const_pointer data() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
: storage_.GetInlinedData();
}
@@ -328,14 +359,14 @@ class InlinedVector {
// `InlinedVector::operator[](...)`
//
// Returns a `reference` to the `i`th element of the inlined vector.
- reference operator[](size_type i) {
+ reference operator[](size_type i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
// Overload of `InlinedVector::operator[](...)` that returns a
// `const_reference` to the `i`th element of the inlined vector.
- const_reference operator[](size_type i) const {
+ const_reference operator[](size_type i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -346,7 +377,7 @@ class InlinedVector {
//
// NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
// in both debug and non-debug builds, `std::out_of_range` will be thrown.
- reference at(size_type i) {
+ reference at(size_type i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (Y_ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type)` failed bounds check");
@@ -359,7 +390,7 @@ class InlinedVector {
//
// NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
// in both debug and non-debug builds, `std::out_of_range` will be thrown.
- const_reference at(size_type i) const {
+ const_reference at(size_type i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (Y_ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type) const` failed bounds check");
@@ -370,14 +401,14 @@ class InlinedVector {
// `InlinedVector::front()`
//
// Returns a `reference` to the first element of the inlined vector.
- reference front() {
+ reference front() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
// Overload of `InlinedVector::front()` that returns a `const_reference` to
// the first element of the inlined vector.
- const_reference front() const {
+ const_reference front() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
@@ -385,14 +416,14 @@ class InlinedVector {
// `InlinedVector::back()`
//
// Returns a `reference` to the last element of the inlined vector.
- reference back() {
+ reference back() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
// Overload of `InlinedVector::back()` that returns a `const_reference` to the
// last element of the inlined vector.
- const_reference back() const {
+ const_reference back() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
@@ -400,63 +431,82 @@ class InlinedVector {
// `InlinedVector::begin()`
//
// Returns an `iterator` to the beginning of the inlined vector.
- iterator begin() noexcept { return data(); }
+ iterator begin() noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// Overload of `InlinedVector::begin()` that returns a `const_iterator` to
// the beginning of the inlined vector.
- const_iterator begin() const noexcept { return data(); }
+ const_iterator begin() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data();
+ }
// `InlinedVector::end()`
//
// Returns an `iterator` to the end of the inlined vector.
- iterator end() noexcept { return data() + size(); }
+ iterator end() noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// Overload of `InlinedVector::end()` that returns a `const_iterator` to the
// end of the inlined vector.
- const_iterator end() const noexcept { return data() + size(); }
+ const_iterator end() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// `InlinedVector::cbegin()`
//
// Returns a `const_iterator` to the beginning of the inlined vector.
- const_iterator cbegin() const noexcept { return begin(); }
+ const_iterator cbegin() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
// `InlinedVector::cend()`
//
// Returns a `const_iterator` to the end of the inlined vector.
- const_iterator cend() const noexcept { return end(); }
+ const_iterator cend() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return end();
+ }
// `InlinedVector::rbegin()`
//
// Returns a `reverse_iterator` from the end of the inlined vector.
- reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+ reverse_iterator rbegin() noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(end());
+ }
// Overload of `InlinedVector::rbegin()` that returns a
// `const_reverse_iterator` from the end of the inlined vector.
- const_reverse_iterator rbegin() const noexcept {
+ const_reverse_iterator rbegin() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(end());
}
// `InlinedVector::rend()`
//
// Returns a `reverse_iterator` from the beginning of the inlined vector.
- reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+ reverse_iterator rend() noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(begin());
+ }
// Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
// from the beginning of the inlined vector.
- const_reverse_iterator rend() const noexcept {
+ const_reverse_iterator rend() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(begin());
}
// `InlinedVector::crbegin()`
//
// Returns a `const_reverse_iterator` from the end of the inlined vector.
- const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+ const_reverse_iterator crbegin() const noexcept
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rbegin();
+ }
// `InlinedVector::crend()`
//
// Returns a `const_reverse_iterator` from the beginning of the inlined
// vector.
- const_reverse_iterator crend() const noexcept { return rend(); }
+ const_reverse_iterator crend() const noexcept Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rend();
+ }
// `InlinedVector::get_allocator()`
//
@@ -566,20 +616,23 @@ class InlinedVector {
//
// Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
// inserted element.
- iterator insert(const_iterator pos, const_reference v) {
+ iterator insert(const_iterator pos,
+ const_reference v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(pos, v);
}
// Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
// move semantics, returning an `iterator` to the newly inserted element.
- iterator insert(const_iterator pos, value_type&& v) {
+ iterator insert(const_iterator pos,
+ value_type&& v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(pos, std::move(v));
}
// Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
// of `v` starting at `pos`, returning an `iterator` pointing to the first of
// the newly inserted elements.
- iterator insert(const_iterator pos, size_type n, const_reference v) {
+ iterator insert(const_iterator pos, size_type n,
+ const_reference v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(pos >= begin());
Y_ABSL_HARDENING_ASSERT(pos <= end());
@@ -607,7 +660,8 @@ class InlinedVector {
// Overload of `InlinedVector::insert(...)` that inserts copies of the
// elements of `list` starting at `pos`, returning an `iterator` pointing to
// the first of the newly inserted elements.
- iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+ iterator insert(const_iterator pos, std::initializer_list<value_type> list)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(pos, list.begin(), list.end());
}
@@ -619,7 +673,7 @@ class InlinedVector {
template <typename ForwardIterator,
EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
iterator insert(const_iterator pos, ForwardIterator first,
- ForwardIterator last) {
+ ForwardIterator last) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(pos >= begin());
Y_ABSL_HARDENING_ASSERT(pos <= end());
@@ -639,7 +693,8 @@ class InlinedVector {
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
DisableIfAtLeastForwardIterator<InputIterator> = 0>
- iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+ iterator insert(const_iterator pos, InputIterator first,
+ InputIterator last) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(pos >= begin());
Y_ABSL_HARDENING_ASSERT(pos <= end());
@@ -656,7 +711,8 @@ class InlinedVector {
// Constructs and inserts an element using `args...` in the inlined vector at
// `pos`, returning an `iterator` pointing to the newly emplaced element.
template <typename... Args>
- iterator emplace(const_iterator pos, Args&&... args) {
+ iterator emplace(const_iterator pos,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(pos >= begin());
Y_ABSL_HARDENING_ASSERT(pos <= end());
@@ -684,7 +740,7 @@ class InlinedVector {
// Constructs and inserts an element using `args...` in the inlined vector at
// `end()`, returning a `reference` to the newly emplaced element.
template <typename... Args>
- reference emplace_back(Args&&... args) {
+ reference emplace_back(Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.EmplaceBack(std::forward<Args>(args)...);
}
@@ -714,8 +770,8 @@ class InlinedVector {
// Erases the element at `pos`, returning an `iterator` pointing to where the
// erased element was located.
//
- // NOTE: may return `end()`, which is not dereferencable.
- iterator erase(const_iterator pos) {
+ // NOTE: may return `end()`, which is not dereferenceable.
+ iterator erase(const_iterator pos) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(pos >= begin());
Y_ABSL_HARDENING_ASSERT(pos < end());
@@ -726,8 +782,9 @@ class InlinedVector {
// range [`from`, `to`), returning an `iterator` pointing to where the first
// erased element was located.
//
- // NOTE: may return `end()`, which is not dereferencable.
- iterator erase(const_iterator from, const_iterator to) {
+ // NOTE: may return `end()`, which is not dereferenceable.
+ iterator erase(const_iterator from,
+ const_iterator to) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(from >= begin());
Y_ABSL_HARDENING_ASSERT(from <= to);
Y_ABSL_HARDENING_ASSERT(to <= end());
@@ -784,39 +841,70 @@ class InlinedVector {
friend H AbslHashValue(H h, const y_absl::InlinedVector<TheT, TheN, TheA>& a);
void MoveAssignment(MemcpyPolicy, InlinedVector&& other) {
+ // Assumption check: we shouldn't be told to use memcpy to implement move
+ // assignment unless we have trivially destructible elements and an
+ // allocator that does nothing fancy.
+ static_assert(y_absl::is_trivially_destructible<value_type>::value, "");
+ static_assert(std::is_same<A, std::allocator<value_type>>::value, "");
+
+ // Throw away our existing heap allocation, if any. There is no need to
+ // destroy the existing elements one by one because we know they are
+ // trivially destructible.
+ storage_.DeallocateIfAllocated();
+
+ // Adopt the other vector's inline elements or heap allocation.
+ storage_.MemcpyFrom(other.storage_);
+ other.storage_.SetInlinedSize(0);
+ }
+
+ // Destroy our existing elements, if any, and adopt the heap-allocated
+ // elements of the other vector.
+ //
+ // REQUIRES: other.storage_.GetIsAllocated()
+ void DestroyExistingAndAdopt(InlinedVector&& other) {
+ Y_ABSL_HARDENING_ASSERT(other.storage_.GetIsAllocated());
+
inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
- storage_.MemcpyFrom(other.storage_);
+ storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
}
void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) {
+ // Fast path: if the other vector is on the heap then we don't worry about
+ // actually move-assigning each element. Instead we only throw away our own
+ // existing elements and adopt the heap allocation of the other vector.
if (other.storage_.GetIsAllocated()) {
- MoveAssignment(MemcpyPolicy{}, std::move(other));
- } else {
- storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
- MoveIterator<A>(other.storage_.GetInlinedData())),
- other.size());
+ DestroyExistingAndAdopt(std::move(other));
+ return;
}
+
+ storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.storage_.GetInlinedData())),
+ other.size());
}
void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) {
+ // Fast path: if the other vector is on the heap then we don't worry about
+ // actually move-assigning each element. Instead we only throw away our own
+ // existing elements and adopt the heap allocation of the other vector.
if (other.storage_.GetIsAllocated()) {
- MoveAssignment(MemcpyPolicy{}, std::move(other));
- } else {
- inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
- storage_.GetAllocator(), data(), size());
- storage_.DeallocateIfAllocated();
-
- IteratorValueAdapter<A, MoveIterator<A>> other_values(
- MoveIterator<A>(other.storage_.GetInlinedData()));
- inlined_vector_internal::ConstructElements<A>(
- storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
- other.storage_.GetSize());
- storage_.SetInlinedSize(other.storage_.GetSize());
+ DestroyExistingAndAdopt(std::move(other));
+ return;
}
+
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
+ storage_.DeallocateIfAllocated();
+
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+ other.storage_.GetSize());
+ storage_.SetInlinedSize(other.storage_.GetSize());
}
Storage storage_;
@@ -843,7 +931,7 @@ bool operator==(const y_absl::InlinedVector<T, N, A>& a,
const y_absl::InlinedVector<T, N, A>& b) {
auto a_data = a.data();
auto b_data = b.data();
- return y_absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+ return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
// `operator!=(...)`
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common_policy_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common_policy_traits.h
index 927f6dac2d..e669efacd9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common_policy_traits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/common_policy_traits.h
@@ -87,7 +87,7 @@ struct common_policy_traits {
}
private:
- // To rank the overloads below for overload resoltion. Rank0 is preferred.
+ // To rank the overloads below for overload resolution. Rank0 is preferred.
struct Rank2 {};
struct Rank1 : Rank2 {};
struct Rank0 : Rank1 {};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h
index b1bd5d4efa..6602a73088 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/compressed_tuple.h
@@ -64,19 +64,6 @@ struct Elem<CompressedTuple<B...>, I>
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
-// Use the __is_final intrinsic if available. Where it's not available, classes
-// declared with the 'final' specifier cannot be used as CompressedTuple
-// elements.
-// TODO(sbenza): Replace this with std::is_final in C++14.
-template <typename T>
-constexpr bool IsFinal() {
-#if defined(__clang__) || defined(__GNUC__)
- return __is_final(T);
-#else
- return false;
-#endif
-}
-
// We can't use EBCO on other CompressedTuples because that would mean that we
// derive from multiple Storage<> instantiations with the same I parameter,
// and potentially from multiple identical Storage<> instantiations. So anytime
@@ -86,20 +73,15 @@ struct uses_inheritance {};
template <typename T>
constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ return std::is_class<T>::value && std::is_empty<T>::value &&
+ !std::is_final<T>::value &&
!std::is_base_of<uses_inheritance, T>::value;
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
-template <typename T, size_t I,
-#if defined(_MSC_VER)
- bool UseBase =
- ShouldUseBase<typename std::enable_if<true, T>::type>()>
-#else
- bool UseBase = ShouldUseBase<T>()>
-#endif
+template <typename T, size_t I, bool UseBase = ShouldUseBase<T>()>
struct Storage {
T value;
constexpr Storage() = default;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
index 9a44a3933e..19a64716be 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/container_memory.h
@@ -165,7 +165,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
std::forward<F>(f));
}
-// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// Given arguments of an std::pair's constructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair.
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h
index 3de475353c..98e3a9449b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hash_function_defaults.h
@@ -56,6 +56,10 @@
#include "y_absl/strings/cord.h"
#include "y_absl/strings/string_view.h"
+#ifdef Y_ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -107,6 +111,48 @@ struct HashEq<y_absl::string_view> : StringHashEq {};
template <>
struct HashEq<y_absl::Cord> : StringHashEq {};
+#ifdef Y_ABSL_HAVE_STD_STRING_VIEW
+
+template <typename TChar>
+struct BasicStringHash {
+ using is_transparent = void;
+
+ size_t operator()(std::basic_string_view<TChar> v) const {
+ return y_absl::Hash<std::basic_string_view<TChar>>{}(v);
+ }
+};
+
+template <typename TChar>
+struct BasicStringEq {
+ using is_transparent = void;
+ bool operator()(std::basic_string_view<TChar> lhs,
+ std::basic_string_view<TChar> rhs) const {
+ return lhs == rhs;
+ }
+};
+
+// Supports heterogeneous lookup for w/u16/u32 string + string_view + char*.
+template <typename TChar>
+struct BasicStringHashEq {
+ using Hash = BasicStringHash<TChar>;
+ using Eq = BasicStringEq<TChar>;
+};
+
+template <>
+struct HashEq<std::wstring> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::wstring_view> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::u16string> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u16string_view> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u32string> : BasicStringHashEq<char32_t> {};
+template <>
+struct HashEq<std::u32string_view> : BasicStringHashEq<char32_t> {};
+
+#endif // Y_ABSL_HAVE_STD_STRING_VIEW
+
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
struct HashEq<T*> {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
index d58a22a0b2..e6369d913d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/hashtablez_sampler.cc
@@ -23,11 +23,13 @@
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/memory/memory.h"
#include "y_absl/profiling/internal/exponential_biased.h"
#include "y_absl/profiling/internal/sample_recorder.h"
#include "y_absl/synchronization/mutex.h"
+#include "y_absl/time/clock.h"
#include "y_absl/utility/utility.h"
namespace y_absl {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
index b90c16528d..7820cf1f9c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/inlined_vector.h
@@ -77,13 +77,6 @@ using IsAtLeastForwardIterator = std::is_convertible<
std::forward_iterator_tag>;
template <typename A>
-using IsMemcpyOk =
- y_absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
- y_absl::is_trivially_copy_constructible<ValueType<A>>,
- y_absl::is_trivially_copy_assignable<ValueType<A>>,
- y_absl::is_trivially_destructible<ValueType<A>>>;
-
-template <typename A>
using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
using IsSwapOk = y_absl::type_traits_internal::IsSwappable<ValueType<A>>;
@@ -308,11 +301,36 @@ class Storage {
struct ElementwiseConstructPolicy {};
using MoveAssignmentPolicy = y_absl::conditional_t<
- IsMemcpyOk<A>::value, MemcpyPolicy,
+ // Fast path: if the value type can be trivially move assigned and
+ // destroyed, and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for
+ // `other` and remove its own reference to them. It's as if we had
+ // individually move-assigned each value and then destroyed the original.
+ y_absl::conjunction<y_absl::is_trivially_move_assignable<ValueType<A>>,
+ y_absl::is_trivially_destructible<ValueType<A>>,
+ std::is_same<A, std::allocator<ValueType<A>>>>::value,
+ MemcpyPolicy,
+ // Otherwise we use move assignment if possible. If not, we simulate
+ // move assignment using move construction.
+ //
+ // Note that this is in contrast to e.g. std::vector and std::optional,
+ // which are themselves not move-assignable when their contained type is
+ // not.
y_absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
ElementwiseConstructPolicy>>;
- using SwapPolicy = y_absl::conditional_t<
- IsMemcpyOk<A>::value, MemcpyPolicy,
+
+ // The policy to be used specifically when swapping inlined elements.
+ using SwapInlinedElementsPolicy = y_absl::conditional_t<
+ // Fast path: if the value type can be trivially move constructed/assigned
+ // and destroyed, and we know the allocator doesn't do anything fancy,
+ // then it's safe for us to simply swap the bytes in the inline storage.
+ // It's as if we had move-constructed a temporary vector, move-assigned
+ // one to the other, then move-assigned the first from the temporary.
+ y_absl::conjunction<y_absl::is_trivially_move_constructible<ValueType<A>>,
+ y_absl::is_trivially_move_assignable<ValueType<A>>,
+ y_absl::is_trivially_destructible<ValueType<A>>,
+ std::is_same<A, std::allocator<ValueType<A>>>>::value,
+ MemcpyPolicy,
y_absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
ElementwiseConstructPolicy>>;
@@ -335,14 +353,21 @@ class Storage {
: metadata_(allocator, /* size and is_allocated */ 0u) {}
~Storage() {
+ // Fast path: if we are empty and not allocated, there's nothing to do.
if (GetSizeAndIsAllocated() == 0) {
- // Empty and not allocated; nothing to do.
- } else if (IsMemcpyOk<A>::value) {
- // No destructors need to be run; just deallocate if necessary.
+ return;
+ }
+
+ // Fast path: if no destructors need to be run and we know the allocator
+ // doesn't do anything fancy, then all we need to do is deallocate (and
+ // maybe not even that).
+ if (y_absl::is_trivially_destructible<ValueType<A>>::value &&
+ std::is_same<A, std::allocator<ValueType<A>>>::value) {
DeallocateIfAllocated();
- } else {
- DestroyContents();
+ return;
}
+
+ DestroyContents();
}
// ---------------------------------------------------------------------------
@@ -365,14 +390,18 @@ class Storage {
return data_.allocated.allocated_data;
}
- Pointer<A> GetInlinedData() {
- return reinterpret_cast<Pointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
+ // Y_ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be
+ // uninitialized, a common pattern in allocate()+construct() APIs.
+ // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking
+ // NOTE: When this was written, LLVM documentation did not explicitly
+ // mention that casting `char*` and using `reinterpret_cast` qualifies
+ // as a bad cast.
+ Y_ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(data_.inlined.inlined_data);
}
- ConstPointer<A> GetInlinedData() const {
- return reinterpret_cast<ConstPointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
+ Y_ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(data_.inlined.inlined_data);
}
SizeType<A> GetAllocatedCapacity() const {
@@ -461,8 +490,32 @@ class Storage {
}
void MemcpyFrom(const Storage& other_storage) {
- Y_ABSL_HARDENING_ASSERT(IsMemcpyOk<A>::value ||
- other_storage.GetIsAllocated());
+ // Assumption check: it doesn't make sense to memcpy inlined elements unless
+ // we know the allocator doesn't do anything fancy, and one of the following
+ // holds:
+ //
+ // * The elements are trivially relocatable.
+ //
+ // * It's possible to trivially assign the elements and then destroy the
+ // source.
+ //
+ // * It's possible to trivially copy construct/assign the elements.
+ //
+ {
+ using V = ValueType<A>;
+ Y_ABSL_HARDENING_ASSERT(
+ other_storage.GetIsAllocated() ||
+ (std::is_same<A, std::allocator<V>>::value &&
+ (
+ // First case above
+ y_absl::is_trivially_relocatable<V>::value ||
+ // Second case above
+ (y_absl::is_trivially_move_assignable<V>::value &&
+ y_absl::is_trivially_destructible<V>::value) ||
+ // Third case above
+ (y_absl::is_trivially_copy_constructible<V>::value ||
+ y_absl::is_trivially_copy_assignable<V>::value))));
+ }
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -542,13 +595,19 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
dst = allocation.data;
src = other.GetAllocatedData();
}
- if (IsMemcpyOk<A>::value) {
+
+ // Fast path: if the value type is trivially copy constructible and we know
+ // the allocator doesn't do anything fancy, then we know it is legal for us to
+ // simply memcpy the other vector's elements.
+ if (y_absl::is_trivially_copy_constructible<ValueType<A>>::value &&
+ std::is_same<A, std::allocator<ValueType<A>>>::value) {
std::memcpy(reinterpret_cast<char*>(dst),
reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
} else {
auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
ConstructElements<A>(GetAllocator(), dst, values, n);
}
+
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
@@ -921,7 +980,7 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
} else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
- SwapInlinedElements(SwapPolicy{}, other_storage_ptr);
+ SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr);
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
@@ -995,7 +1054,7 @@ template <typename NotMemcpyPolicy>
void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
Storage* other) {
// Note: `destroy` needs to use pre-swap allocator while `construct` -
- // post-swap allocator. Allocators will be swaped later on outside of
+ // post-swap allocator. Allocators will be swapped later on outside of
// `SwapInlinedElements`.
Storage* small_ptr = this;
Storage* large_ptr = other;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h
index 524178ff6f..e1a6097b68 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_map.h
@@ -71,43 +71,51 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
// m.insert_or_assign(n, n);
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), v);
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, v);
}
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+ V&& v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+ const V& v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(std::forward<K>(k), v).first;
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+ V&& v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(k, std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+ const V& v) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(k, v).first;
}
@@ -118,29 +126,33 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0,
K* = nullptr>
- std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
template <class K = key_type, class... Args, K* = nullptr>
- iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ iterator try_emplace(const_iterator, key_arg<K>&& k,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}
template <class K = key_type, class... Args>
- iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ iterator try_emplace(const_iterator, const key_arg<K>& k,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(k, std::forward<Args>(args)...).first;
}
template <class K = key_type, class P = Policy>
- MappedReference<P> at(const key_arg<K>& key) {
+ MappedReference<P> at(const key_arg<K>& key) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
@@ -150,7 +162,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class P = Policy>
- MappedConstReference<P> at(const key_arg<K>& key) const {
+ MappedConstReference<P> at(const key_arg<K>& key) const
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
@@ -160,18 +173,21 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class P = Policy, K* = nullptr>
- MappedReference<P> operator[](key_arg<K>&& key) {
+ MappedReference<P> operator[](key_arg<K>&& key)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Policy::value(&*try_emplace(std::forward<K>(key)).first);
}
template <class K = key_type, class P = Policy>
- MappedReference<P> operator[](const key_arg<K>& key) {
+ MappedReference<P> operator[](const key_arg<K>& key)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Policy::value(&*try_emplace(key).first);
}
private:
template <class K, class V>
- std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
@@ -181,7 +197,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class... Args>
- std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct,
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
index e41730f431..dd84bf6cd9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.cc
@@ -15,34 +15,50 @@
#include "y_absl/container/internal/raw_hash_set.h"
#include <atomic>
+#include <cassert>
#include <cstddef>
#include <cstring>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
+#include "y_absl/base/dynamic_annotations.h"
+#include "y_absl/hash/hash.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace container_internal {
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
-// We have 17 bytes because there may be a generation counter. Any constant is
-// fine for the generation counter.
-alignas(16) Y_ABSL_CONST_INIT Y_ABSL_DLL const ctrl_t kEmptyGroup[17] = {
+// We have space for `growth_left` before a single block of control bytes. A
+// single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find(). In order to ensure
+// that the control bytes are aligned to 16, we have 16 bytes before the control
+// bytes even though growth_left only needs 8.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+alignas(16) Y_ABSL_CONST_INIT Y_ABSL_DLL const ctrl_t kEmptyGroup[32] = {
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- static_cast<ctrl_t>(0)};
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
+namespace {
+
// Returns "random" seed.
inline size_t RandomSeed() {
#ifdef Y_ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
+ // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
+ // accessing thread local storage data from loaded libraries
+ // (https://github.com/google/sanitizers/issues/1265), for this reason counter
+ // needs to be annotated as initialized.
+ Y_ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
size_t value = ++counter;
#else // Y_ABSL_HAVE_THREAD_LOCAL
static std::atomic<size_t> counter(0);
@@ -51,6 +67,32 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
+} // namespace
+
+GenerationType* EmptyGeneration() {
+ if (SwisstableGenerationsEnabled()) {
+ constexpr size_t kNumEmptyGenerations = 1024;
+ static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
+ return const_cast<GenerationType*>(
+ &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
+ }
+ return nullptr;
+}
+
+bool CommonFieldsGenerationInfoEnabled::
+ should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const {
+ if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
+ if (reserved_growth_ > 0) return false;
+ // Note: we can't use the abseil-random library because abseil-random
+ // depends on swisstable. We want to return true with probability
+ // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
+ // we probe based on a random hash and see if the offset is less than
+ // RehashProbabilityConstant().
+ return probe(ctrl, capacity, y_absl::HashOf(RandomSeed())).offset() <
+ RehashProbabilityConstant();
+}
+
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
@@ -75,21 +117,22 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
-// Return address of the ith slot in slots where each slot occupies slot_size.
+// Returns the address of the ith slot in slots where each slot occupies
+// slot_size.
static inline void* SlotAddress(void* slot_array, size_t slot,
size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
(slot * slot_size));
}
-// Return the address of the slot just after slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just after slot assuming each slot has the
+// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
}
-// Return the address of the slot just before slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just before slot assuming each slot has the
+// specified size.
static inline void* PrevSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
@@ -97,8 +140,8 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
void DropDeletesWithoutResize(CommonFields& common,
const PolicyFunctions& policy, void* tmp_space) {
void* set = &common;
- void* slot_array = common.slots_;
- const size_t capacity = common.capacity_;
+ void* slot_array = common.slot_array();
+ const size_t capacity = common.capacity();
assert(IsValidCapacity(capacity));
assert(!is_small(capacity));
// Algorithm:
@@ -117,7 +160,7 @@ void DropDeletesWithoutResize(CommonFields& common,
// swap current element with target element
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
@@ -177,11 +220,11 @@ void DropDeletesWithoutResize(CommonFields& common,
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
assert(IsFull(*it) && "erasing a dangling iterator");
- --c.size_;
- const auto index = static_cast<size_t>(it - c.control_);
- const size_t index_before = (index - Group::kWidth) & c.capacity_;
+ c.set_size(c.size() - 1);
+ const auto index = static_cast<size_t>(it - c.control());
+ const size_t index_before = (index - Group::kWidth) & c.capacity();
const auto empty_after = Group(it).MaskEmpty();
- const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
+ const auto empty_before = Group(c.control() + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -193,26 +236,24 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
slot_size);
- c.growth_left() += (was_never_full ? 1 : 0);
+ c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
c.infoz().RecordErase();
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
- c.size_ = 0;
+ c.set_size(0);
if (reuse) {
ResetCtrl(c, policy.slot_size);
- c.infoz().RecordStorageChanged(0, c.capacity_);
+ c.infoz().RecordStorageChanged(0, c.capacity());
} else {
- void* set = &c;
- (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
- c.control_ = EmptyGroup();
+ (*policy.dealloc)(c, policy);
+ c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
- c.slots_ = nullptr;
- c.capacity_ = 0;
- c.growth_left() = 0;
+ c.set_slots(nullptr);
+ c.set_capacity(0);
c.infoz().RecordClearedReservation();
- assert(c.size_ == 0);
+ assert(c.size() == 0);
c.infoz().RecordStorageChanged(0, 0);
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
index 26fda8b83c..1ac9c5184d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/container/internal/raw_hash_set.h
@@ -62,6 +62,8 @@
// pseudo-struct:
//
// struct BackingArray {
+// // The number of elements we can insert before growing the capacity.
+// size_t growth_left;
// // Control bytes for the "real" slots.
// ctrl_t ctrl[capacity];
// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
@@ -115,7 +117,7 @@
// starting with that index and extract potential candidates: occupied slots
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
// group, we stop and return an error. Each candidate slot `y` is compared with
-// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
// next probe index. Tombstones effectively behave like full slots that never
// match the value we're looking for.
//
@@ -174,21 +176,23 @@
#include <algorithm>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
+#include <util/generic/string.h>
#include <tuple>
#include <type_traits>
#include <utility>
#include "y_absl/base/config.h"
#include "y_absl/base/internal/endian.h"
-#include "y_absl/base/internal/prefetch.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/optimization.h"
#include "y_absl/base/port.h"
+#include "y_absl/base/prefetch.h"
#include "y_absl/container/internal/common.h"
#include "y_absl/container/internal/compressed_tuple.h"
#include "y_absl/container/internal/container_memory.h"
@@ -235,6 +239,14 @@ namespace container_internal {
// We use uint8_t so we don't need to worry about padding.
using GenerationType = uint8_t;
+// A sentinel value for empty generations. Using 0 makes it easy to constexpr
+// initialize an array of this value.
+constexpr GenerationType SentinelEmptyGeneration() { return 0; }
+
+constexpr GenerationType NextGeneration(GenerationType generation) {
+ return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
+}
+
#ifdef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
constexpr bool SwisstableGenerationsEnabled() { return true; }
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
@@ -367,12 +379,12 @@ class NonIterableBitMask {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
- // Return the number of trailing zero *abstract* bits.
+ // Returns the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- // Return the number of leading zero *abstract* bits.
+ // Returns the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
@@ -475,19 +487,23 @@ static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
"ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-Y_ABSL_DLL extern const ctrl_t kEmptyGroup[17];
+// See definition comment for why this is size 32.
+Y_ABSL_DLL extern const ctrl_t kEmptyGroup[32];
// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
// Const must be cast away here; no uses of this function will actually write
// to it, because it is only used for empty tables.
- return const_cast<ctrl_t*>(kEmptyGroup);
+ return const_cast<ctrl_t*>(kEmptyGroup + 16);
}
-// Returns a pointer to the generation byte at the end of the empty group, if it
-// exists.
-inline GenerationType* EmptyGeneration() {
- return reinterpret_cast<GenerationType*>(EmptyGroup() + 16);
+// Returns a pointer to a generation to use for an empty hashtable.
+GenerationType* EmptyGeneration();
+
+// Returns whether `generation` is a generation for an empty hashtable that
+// could be returned by EmptyGeneration().
+inline bool IsEmptyGeneration(const GenerationType* generation) {
+ return *generation == SentinelEmptyGeneration();
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
@@ -674,9 +690,10 @@ struct GroupAArch64Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = mask & msbs;
- auto res = (~x + (x >> 7)) & ~lsbs;
+ constexpr uint64_t slsbs = 0x0202020202020202ULL;
+ constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
+ auto x = slsbs & (mask >> 6);
+ auto res = (x + midbs) | msbs;
little_endian::Store64(dst, res);
}
@@ -749,6 +766,15 @@ using Group = GroupAArch64Impl;
using Group = GroupPortableImpl;
#endif
+// When there is an insertion with no reserved growth, we rehash with
+// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
+// constant divided by capacity ensures that inserting N elements is still O(N)
+// in the average case. Using the constant 16 means that we expect to rehash ~8
+// times more often than when generations are disabled. We are adding expected
+// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
+// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
+inline size_t RehashProbabilityConstant() { return 16; }
+
class CommonFieldsGenerationInfoEnabled {
// A sentinel value for reserved_growth_ indicating that we just ran out of
// reserved growth on the last insertion. When reserve is called and then
@@ -760,8 +786,11 @@ class CommonFieldsGenerationInfoEnabled {
public:
CommonFieldsGenerationInfoEnabled() = default;
CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
- : reserved_growth_(that.reserved_growth_), generation_(that.generation_) {
+ : reserved_growth_(that.reserved_growth_),
+ reservation_size_(that.reservation_size_),
+ generation_(that.generation_) {
that.reserved_growth_ = 0;
+ that.reservation_size_ = 0;
that.generation_ = EmptyGeneration();
}
CommonFieldsGenerationInfoEnabled& operator=(
@@ -769,19 +798,17 @@ class CommonFieldsGenerationInfoEnabled {
// Whether we should rehash on insert in order to detect bugs of using invalid
// references. We rehash on the first insertion after reserved_growth_ reaches
- // 0 after a call to reserve.
- // TODO(b/254649633): we could potentially do a rehash with low probability
+ // 0 after a call to reserve. We also do a rehash with low probability
// whenever reserved_growth_ is zero.
- bool should_rehash_for_bug_detection_on_insert() const {
- return reserved_growth_ == kReservedGrowthJustRanOut;
- }
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const;
void maybe_increment_generation_on_insert() {
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
if (reserved_growth_ > 0) {
if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
} else {
- ++*generation_;
+ *generation_ = NextGeneration(*generation_);
}
}
void reset_reserved_growth(size_t reservation, size_t size) {
@@ -789,6 +816,8 @@ class CommonFieldsGenerationInfoEnabled {
}
size_t reserved_growth() const { return reserved_growth_; }
void set_reserved_growth(size_t r) { reserved_growth_ = r; }
+ size_t reservation_size() const { return reservation_size_; }
+ void set_reservation_size(size_t r) { reservation_size_ = r; }
GenerationType generation() const { return *generation_; }
void set_generation(GenerationType g) { *generation_ = g; }
GenerationType* generation_ptr() const { return generation_; }
@@ -796,10 +825,14 @@ class CommonFieldsGenerationInfoEnabled {
private:
// The number of insertions remaining that are guaranteed to not rehash due to
- // a prior call to reserve. Note: we store reserved growth rather than
+ // a prior call to reserve. Note: we store reserved growth in addition to
// reservation size because calls to erase() decrease size_ but don't decrease
// reserved growth.
size_t reserved_growth_ = 0;
+ // The maximum argument to reserve() since the container was cleared. We need
+ // to keep track of this, in addition to reserved growth, because we reset
+ // reserved growth to this when erase(begin(), end()) is called.
+ size_t reservation_size_ = 0;
// Pointer to the generation counter, which is used to validate iterators and
// is stored in the backing array between the control bytes and the slots.
// Note that we can't store the generation inside the container itself and
@@ -820,11 +853,15 @@ class CommonFieldsGenerationInfoDisabled {
CommonFieldsGenerationInfoDisabled& operator=(
CommonFieldsGenerationInfoDisabled&&) = default;
- bool should_rehash_for_bug_detection_on_insert() const { return false; }
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+ return false;
+ }
void maybe_increment_generation_on_insert() {}
void reset_reserved_growth(size_t, size_t) {}
size_t reserved_growth() const { return 0; }
void set_reserved_growth(size_t) {}
+ size_t reservation_size() const { return 0; }
+ void set_reservation_size(size_t) {}
GenerationType generation() const { return 0; }
void set_generation(GenerationType) {}
GenerationType* generation_ptr() const { return nullptr; }
@@ -867,6 +904,44 @@ using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
#endif
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// Computes the offset from the start of the backing allocation of the control
+// bytes. growth_left is stored at the beginning of the backing array.
+inline size_t ControlOffset() { return sizeof(size_t); }
+
+// Returns the number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) of the generation counter (if it exists).
+inline size_t GenerationOffset(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return ControlOffset() + num_control_bytes;
+}
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
+ (~slot_align + 1);
+}
+
+// Given the capacity of a table, computes the total size of the backing
+// array.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
@@ -884,72 +959,102 @@ class CommonFields : public CommonFieldsGenerationInfo {
std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
// Explicitly copying fields into "this" and then resetting "that"
// fields generates less code then calling y_absl::exchange per field.
- control_(that.control_),
- slots_(that.slots_),
- size_(that.size_),
- capacity_(that.capacity_),
- compressed_tuple_(that.growth_left(), std::move(that.infoz())) {
- that.control_ = EmptyGroup();
- that.slots_ = nullptr;
- that.size_ = 0;
- that.capacity_ = 0;
- that.growth_left() = 0;
+ control_(that.control()),
+ slots_(that.slot_array()),
+ capacity_(that.capacity()),
+ compressed_tuple_(that.size(), std::move(that.infoz())) {
+ that.set_control(EmptyGroup());
+ that.set_slots(nullptr);
+ that.set_capacity(0);
+ that.set_size(0);
}
CommonFields& operator=(CommonFields&&) = default;
+ ctrl_t* control() const { return control_; }
+ void set_control(ctrl_t* c) { control_ = c; }
+ void* backing_array_start() const {
+ // growth_left is stored before control bytes.
+ assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
+ return control() - sizeof(size_t);
+ }
+
+ // Note: we can't use slots() because Qt defines "slots" as a macro.
+ void* slot_array() const { return slots_; }
+ void set_slots(void* s) { slots_ = s; }
+
+ // The number of filled slots.
+ size_t size() const { return compressed_tuple_.template get<0>(); }
+ void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
+
+ // The total number of available slots.
+ size_t capacity() const { return capacity_; }
+ void set_capacity(size_t c) {
+ assert(c == 0 || IsValidCapacity(c));
+ capacity_ = c;
+ }
+
// The number of slots we can still fill without needing to rehash.
- size_t& growth_left() { return compressed_tuple_.template get<0>(); }
+ // This is stored in the heap allocation before the control bytes.
+ size_t growth_left() const {
+ return *reinterpret_cast<size_t*>(backing_array_start());
+ }
+ void set_growth_left(size_t gl) {
+ *reinterpret_cast<size_t*>(backing_array_start()) = gl;
+ }
HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
const HashtablezInfoHandle& infoz() const {
return compressed_tuple_.template get<1>();
}
+ bool should_rehash_for_bug_detection_on_insert() const {
+ return CommonFieldsGenerationInfo::
+ should_rehash_for_bug_detection_on_insert(control(), capacity());
+ }
void reset_reserved_growth(size_t reservation) {
- CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size_);
+ CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
+ }
+
+ // The size of the backing array allocation.
+ size_t alloc_size(size_t slot_size, size_t slot_align) const {
+ return AllocSize(capacity(), slot_size, slot_align);
}
+ // Returns the number of control bytes set to kDeleted. For testing only.
+ size_t TombstonesCount() const {
+ return static_cast<size_t>(
+ std::count(control(), control() + capacity(), ctrl_t::kDeleted));
+ }
+
+ private:
// TODO(b/259599413): Investigate removing some of these fields:
// - control/slots can be derived from each other
- // - size can be moved into the slot array
+ // - we can use 6 bits for capacity since it's always a power of two minus 1
- // The control bytes (and, also, a pointer to the base of the backing array).
+ // The control bytes (and, also, a pointer near to the base of the backing
+ // array).
//
// This contains `capacity + 1 + NumClonedBytes()` entries, even
// when the table is empty (hence EmptyGroup).
+ //
+ // Note that growth_left is stored immediately before this pointer.
ctrl_t* control_ = EmptyGroup();
// The beginning of the slots, located at `SlotOffset()` bytes after
// `control`. May be null for empty tables.
void* slots_ = nullptr;
- // The number of filled slots.
- size_t size_ = 0;
-
- // The total number of available slots.
size_t capacity_ = 0;
- // Bundle together growth_left and HashtablezInfoHandle to ensure EBO for
+ // Bundle together size and HashtablezInfoHandle to ensure EBO for
// HashtablezInfoHandle when sampling is turned off.
y_absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
compressed_tuple_{0u, HashtablezInfoHandle{}};
};
-// Returns he number of "cloned control bytes".
-//
-// This is the number of control bytes that are present both at the beginning
-// of the control byte array and at the end, such that we can create a
-// `Group::kWidth`-width probe window starting from any control byte.
-constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
-
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
-// Returns whether `n` is a valid capacity (i.e., number of slots).
-//
-// A valid capacity is a non-zero integer `2^m - 1`.
-inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-
// Returns the next valid capacity after `n`.
inline size_t NextCapacity(size_t n) {
assert(IsValidCapacity(n) || n == 0);
@@ -1021,34 +1126,75 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-#define Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, generation, generation_ptr, \
- operation) \
- do { \
- Y_ABSL_HARDENING_ASSERT( \
- (ctrl != nullptr) && operation \
- " called on invalid iterator. The iterator might be an end() " \
- "iterator or may have been default constructed."); \
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) \
- Y_ABSL_INTERNAL_LOG(FATAL, operation \
- " called on invalidated iterator. The table could " \
- "have rehashed since this iterator was initialized."); \
- Y_ABSL_HARDENING_ASSERT( \
- (IsFull(*ctrl)) && operation \
- " called on invalid iterator. The element might have been erased or " \
- "the table might have rehashed."); \
- } while (0)
+constexpr bool SwisstableDebugEnabled() {
+#if defined(Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
+ Y_ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
+ const GenerationType* generation_ptr,
+ const char* operation) {
+ if (!SwisstableDebugEnabled()) return;
+ if (ctrl == nullptr) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ TString(operation) + " called on end() iterator.");
+ }
+ if (ctrl == EmptyGroup()) {
+ Y_ABSL_INTERNAL_LOG(FATAL, TString(operation) +
+ " called on default-constructed iterator.");
+ }
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ TString(operation) +
+ " called on invalid iterator. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!IsFull(*ctrl)) {
+ Y_ABSL_INTERNAL_LOG(
+ FATAL,
+ TString(operation) +
+ " called on invalid iterator. The element was likely erased.");
+ }
+ } else {
+ if (!IsFull(*ctrl)) {
+ Y_ABSL_INTERNAL_LOG(
+ FATAL,
+ TString(operation) +
+ " called on invalid iterator. The element might have been erased "
+ "or the table might have rehashed. Consider running with "
+ "--config=asan to diagnose rehashing issues.");
+ }
+ }
+}
// Note that for comparisons, null/end iterators are valid.
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
GenerationType generation,
const GenerationType* generation_ptr) {
- Y_ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid iterator comparison. The element might have "
- "been erased or the table might have rehashed.");
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. The table could have "
- "rehashed since this iterator was initialized.");
+ if (!SwisstableDebugEnabled()) return;
+ const bool ctrl_is_valid_for_comparison =
+ ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!ctrl_is_valid_for_comparison) {
+ Y_ABSL_INTERNAL_LOG(
+ FATAL, "Invalid iterator comparison. The element was likely erased.");
+ }
+ } else {
+ Y_ABSL_HARDENING_ASSERT(
+ ctrl_is_valid_for_comparison &&
+ "Invalid iterator comparison. The element might have been erased or "
+ "the table might have rehashed. Consider running with --config=asan to "
+ "diagnose rehashing issues.");
}
}
@@ -1074,16 +1220,54 @@ inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
// Asserts that two iterators come from the same container.
// Note: we take slots by reference so that it's not UB if they're uninitialized
// as long as we don't read them (when ctrl is null).
-// TODO(b/254649633): when generations are enabled, we can detect more cases of
-// different containers by comparing the pointers to the generations - this
-// can cover cases of end iterators that we would otherwise miss.
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
const void* const& slot_a,
- const void* const& slot_b) {
- Y_ABSL_HARDENING_ASSERT(
- AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
- "Invalid iterator comparison. The iterators may be from different "
- "containers or the container might have rehashed.");
+ const void* const& slot_b,
+ const GenerationType* generation_ptr_a,
+ const GenerationType* generation_ptr_b) {
+ if (!SwisstableDebugEnabled()) return;
+ const bool a_is_default = ctrl_a == EmptyGroup();
+ const bool b_is_default = ctrl_b == EmptyGroup();
+ if (a_is_default != b_is_default) {
+ Y_ABSL_INTERNAL_LOG(
+ FATAL,
+ "Invalid iterator comparison. Comparing default-constructed iterator "
+ "with non-default-constructed iterator.");
+ }
+ if (a_is_default && b_is_default) return;
+
+ if (SwisstableGenerationsEnabled()) {
+ if (generation_ptr_a == generation_ptr_b) return;
+ const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
+ const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
+ if (a_is_empty != b_is_empty) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator from "
+ "a non-empty hashtable with an iterator from an empty "
+ "hashtable.");
+ }
+ if (a_is_empty && b_is_empty) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterators from "
+ "different empty hashtables.");
+ }
+ const bool a_is_end = ctrl_a == nullptr;
+ const bool b_is_end = ctrl_b == nullptr;
+ if (a_is_end || b_is_end) {
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator with "
+ "an end() iterator from a different hashtable.");
+ }
+ Y_ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing non-end() "
+ "iterators from different hashtables.");
+ } else {
+ Y_ABSL_HARDENING_ASSERT(
+ AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+ "Invalid iterator comparison. The iterators may be from different "
+ "containers or the container might have rehashed. Consider running "
+ "with --config=asan to diagnose rehashing issues.");
+ }
}
struct FindInfo {
@@ -1106,11 +1290,13 @@ struct FindInfo {
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
// Begins a probing operation on `common.control`, using `hash`.
-inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
- const ctrl_t* ctrl = common.control_;
- const size_t capacity = common.capacity_;
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+ size_t hash) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
+inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
+ return probe(common.control(), common.capacity(), hash);
+}
// Probes an array of control bits using a probe sequence derived from `hash`,
// and returns the offset corresponding to the first deleted or empty slot.
@@ -1122,7 +1308,7 @@ inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
template <typename = void>
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
auto seq = probe(common, hash);
- const ctrl_t* ctrl = common.control_;
+ const ctrl_t* ctrl = common.control();
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MaskEmptyOrDeleted();
@@ -1132,14 +1318,14 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(common.capacity_) && ShouldInsertBackwards(hash, ctrl)) {
+ if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() <= common.capacity_ && "full table!");
+ assert(seq.index() <= common.capacity() && "full table!");
}
}
@@ -1153,18 +1339,18 @@ extern template FindInfo find_first_non_full(const CommonFields&, size_t);
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
inline void ResetGrowthLeft(CommonFields& common) {
- common.growth_left() = CapacityToGrowth(common.capacity_) - common.size_;
+ common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
}
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
// array as marked as empty.
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
- const size_t capacity = common.capacity_;
- ctrl_t* ctrl = common.control_;
+ const size_t capacity = common.capacity();
+ ctrl_t* ctrl = common.control();
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
capacity + 1 + NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(common.slots_, slot_size * capacity);
+ SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
ResetGrowthLeft(common);
}
@@ -1174,17 +1360,17 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
// mirror the value to the cloned tail if necessary.
inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
size_t slot_size) {
- const size_t capacity = common.capacity_;
+ const size_t capacity = common.capacity();
assert(i < capacity);
- auto* slot_i = static_cast<const char*>(common.slots_) + i * slot_size;
+ auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
if (IsFull(h)) {
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
} else {
SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ctrl[i] = h;
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
@@ -1195,56 +1381,41 @@ inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
}
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) of the generation counter (if it exists).
-inline size_t GenerationOffset(size_t capacity) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return num_control_bytes;
-}
-
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) at which the slots begin.
-inline size_t SlotOffset(size_t capacity, size_t slot_align) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return (num_control_bytes + NumGenerationBytes() + slot_align - 1) &
- (~slot_align + 1);
-}
-
-// Given the capacity of a table, computes the total size of the backing
-// array.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
+// growth_left (which is a size_t) is stored with the backing array.
+constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
+ return (std::max)(align_of_slot, alignof(size_t));
}
template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
Y_ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
- assert(c.capacity_);
+ assert(c.capacity());
// Folks with custom allocators often make unwarranted assumptions about the
// behavior of their classes vis-a-vis trivial destructability and what
// calls they will or won't make. Avoid sampling for people with custom
// allocators to get us out of this mess. This is not a hard guarantee but
// a workaround while we plan the exact guarantee we want to provide.
const size_t sample_size =
- (std::is_same<Alloc, std::allocator<char>>::value && c.slots_ == nullptr)
+ (std::is_same<Alloc, std::allocator<char>>::value &&
+ c.slot_array() == nullptr)
? SizeOfSlot
: 0;
- const size_t cap = c.capacity_;
+ const size_t cap = c.capacity();
+ const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
+ // growth_left (which is a size_t) is stored with the backing array.
char* mem = static_cast<char*>(
- Allocate<AlignOfSlot>(&alloc, AllocSize(cap, SizeOfSlot, AlignOfSlot)));
+ Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
const GenerationType old_generation = c.generation();
c.set_generation_ptr(
reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
- c.set_generation(old_generation + 1);
- c.control_ = reinterpret_cast<ctrl_t*>(mem);
- c.slots_ = mem + SlotOffset(cap, AlignOfSlot);
+ c.set_generation(NextGeneration(old_generation));
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
+ c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
ResetCtrl(c, SizeOfSlot);
if (sample_size) {
c.infoz() = Sample(sample_size);
}
- c.infoz().RecordStorageChanged(c.size_, cap);
+ c.infoz().RecordStorageChanged(c.size(), cap);
}
// PolicyFunctions bundles together some information for a particular
@@ -1254,15 +1425,14 @@ Y_ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
struct PolicyFunctions {
size_t slot_size;
- // Return the hash of the pointed-to slot.
+ // Returns the hash of the pointed-to slot.
size_t (*hash_slot)(void* set, void* slot);
// Transfer the contents of src_slot to dst_slot.
void (*transfer)(void* set, void* dst_slot, void* src_slot);
- // Deallocate the specified backing store which is sized for n slots.
- void (*dealloc)(void* set, const PolicyFunctions& policy, ctrl_t* ctrl,
- void* slot_array, size_t n);
+ // Deallocate the backing store from common.
+ void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
};
// ClearBackingArray clears the backing array, either modifying it in place,
@@ -1279,16 +1449,16 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
// function body for raw_hash_set instantiations that have the
// same slot alignment.
template <size_t AlignOfSlot>
-Y_ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(void*,
- const PolicyFunctions& policy,
- ctrl_t* ctrl, void* slot_array,
- size_t n) {
+Y_ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
+ const PolicyFunctions& policy) {
// Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_array, policy.slot_size * n);
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ policy.slot_size * common.capacity());
std::allocator<char> alloc;
- Deallocate<AlignOfSlot>(&alloc, ctrl,
- AllocSize(n, policy.slot_size, AlignOfSlot));
+ Deallocate<BackingArrayAlignment(AlignOfSlot)>(
+ &alloc, common.backing_array_start(),
+ common.alloc_size(policy.slot_size, AlignOfSlot));
}
// For trivially relocatable types we use memcpy directly. This allows us to
@@ -1419,22 +1589,19 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator*()");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
pointer operator->() const {
- Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator->");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
return &operator*();
}
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- Y_ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator++");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -1448,9 +1615,10 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_);
AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
+ AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
+ a.generation_ptr(), b.generation_ptr());
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -1469,7 +1637,7 @@ class raw_hash_set {
}
// For end() iterators.
explicit iterator(const GenerationType* generation_ptr)
- : HashSetIteratorGenerationInfo(generation_ptr) {}
+ : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
// Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
// they reach one.
@@ -1484,7 +1652,9 @@ class raw_hash_set {
if (Y_ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
- ctrl_t* ctrl_ = nullptr;
+ // We use EmptyGroup() for default-constructed iterators so that they can
+ // be distinguished from end iterators, which have nullptr ctrl_.
+ ctrl_t* ctrl_ = EmptyGroup();
// To avoid uninitialized member warnings, put slot_ in an anonymous union.
// The member is not initialized on singleton and end iterators.
union {
@@ -1537,9 +1707,9 @@ class raw_hash_set {
// Note: can't use `= default` due to non-default noexcept (causes
// problems for some compilers). NOLINTNEXTLINE
raw_hash_set() noexcept(
- std::is_nothrow_default_constructible<hasher>::value&&
- std::is_nothrow_default_constructible<key_equal>::value&&
- std::is_nothrow_default_constructible<allocator_type>::value) {}
+ std::is_nothrow_default_constructible<hasher>::value &&
+ std::is_nothrow_default_constructible<key_equal>::value &&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
Y_ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
size_t bucket_count, const hasher& hash = hasher(),
@@ -1547,7 +1717,7 @@ class raw_hash_set {
const allocator_type& alloc = allocator_type())
: settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
- common().capacity_ = NormalizeCapacity(bucket_count);
+ common().set_capacity(NormalizeCapacity(bucket_count));
initialize_slots();
}
}
@@ -1649,7 +1819,9 @@ class raw_hash_set {
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
- reserve(that.size());
+ const size_t size = that.size();
+ if (size == 0) return;
+ reserve(size);
// Because the table is guaranteed to be empty, we can do something faster
// than a full `insert`.
for (const auto& v : that) {
@@ -1660,14 +1832,14 @@ class raw_hash_set {
common().maybe_increment_generation_on_insert();
infoz().RecordInsert(hash, target.probe_length);
}
- common().size_ = that.size();
- growth_left() -= that.size();
+ common().set_size(size);
+ set_growth_left(growth_left() - size);
}
Y_ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
- std::is_nothrow_copy_constructible<hasher>::value&&
- std::is_nothrow_copy_constructible<key_equal>::value&&
- std::is_nothrow_copy_constructible<allocator_type>::value)
+ std::is_nothrow_copy_constructible<hasher>::value &&
+ std::is_nothrow_copy_constructible<key_equal>::value &&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
: // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
@@ -1696,9 +1868,9 @@ class raw_hash_set {
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
- y_absl::allocator_traits<allocator_type>::is_always_equal::value&&
- std::is_nothrow_move_assignable<hasher>::value&&
- std::is_nothrow_move_assignable<key_equal>::value) {
+ y_absl::allocator_traits<allocator_type>::is_always_equal::value &&
+ std::is_nothrow_move_assignable<hasher>::value &&
+ std::is_nothrow_move_assignable<key_equal>::value) {
// TODO(sbenza): We should only use the operations from the noexcept clause
// to make sure we actually adhere to that contract.
// NOLINTNEXTLINE: not returning *this for performance.
@@ -1714,30 +1886,36 @@ class raw_hash_set {
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), control(),
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), common().backing_array_start(),
AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
infoz().Unregister();
}
- iterator begin() {
+ iterator begin() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = iterator_at(0);
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return iterator(common().generation_ptr()); }
+ iterator end() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
- const_iterator begin() const {
+ const_iterator begin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return iterator(common().generation_ptr()); }
- const_iterator cbegin() const { return begin(); }
- const_iterator cend() const { return end(); }
+ const_iterator end() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
+ const_iterator cbegin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
+ const_iterator cend() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
bool empty() const { return !size(); }
- size_t size() const { return common().size_; }
- size_t capacity() const { return common().capacity_; }
+ size_t size() const { return common().size(); }
+ size_t capacity() const { return common().capacity(); }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
Y_ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -1753,10 +1931,10 @@ class raw_hash_set {
// Already guaranteed to be empty; so nothing to do.
} else {
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(),
- /*reuse=*/cap < 128);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
}
common().set_reserved_growth(0);
+ common().set_reservation_size(0);
}
inline void destroy_slots() {
@@ -1780,7 +1958,7 @@ class raw_hash_set {
template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- std::pair<iterator, bool> insert(T&& value) {
+ std::pair<iterator, bool> insert(T&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<T>(value));
}
@@ -1795,13 +1973,11 @@ class raw_hash_set {
// const char* p = "hello";
// s.insert(p);
//
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- std::pair<iterator, bool> insert(const T& value) {
+ std::pair<iterator, bool> insert(const T& value)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(value);
}
@@ -1810,7 +1986,8 @@ class raw_hash_set {
//
// flat_hash_map<TString, int> s;
// s.insert({"abc", 42});
- std::pair<iterator, bool> insert(init_type&& value) {
+ std::pair<iterator, bool> insert(init_type&& value)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::move(value));
}
@@ -1819,21 +1996,20 @@ class raw_hash_set {
template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- iterator insert(const_iterator, T&& value) {
+ iterator insert(const_iterator, T&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::forward<T>(value)).first;
}
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- iterator insert(const_iterator, const T& value) {
+ iterator insert(const_iterator,
+ const T& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(value).first;
}
- iterator insert(const_iterator, init_type&& value) {
+ iterator insert(const_iterator,
+ init_type&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::move(value)).first;
}
@@ -1851,7 +2027,7 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return {end(), false, node_type()};
const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
@@ -1865,7 +2041,8 @@ class raw_hash_set {
}
}
- iterator insert(const_iterator, node_type&& node) {
+ iterator insert(const_iterator,
+ node_type&& node) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
@@ -1882,7 +2059,8 @@ class raw_hash_set {
// m.emplace("abc", "xyz");
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return PolicyTraits::apply(EmplaceDecomposable{*this},
std::forward<Args>(args)...);
}
@@ -1892,7 +2070,8 @@ class raw_hash_set {
// destroys.
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
@@ -1902,14 +2081,16 @@ class raw_hash_set {
}
template <class... Args>
- iterator emplace_hint(const_iterator, Args&&... args) {
+ iterator emplace_hint(const_iterator,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<Args>(args)...).first;
}
// Extension API: support for lazy emplace.
//
// Looks up key in the table. If found, returns the iterator to the element.
- // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
+ // and returns an iterator to the new element.
//
// `f` must abide by several restrictions:
// - it MUST call `raw_hash_set::constructor` with arguments as if a
@@ -1952,7 +2133,8 @@ class raw_hash_set {
};
template <class K = key_type, class F>
- iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ iterator lazy_emplace(const key_arg<K>& key,
+ F&& f) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = find_or_prepare_insert(key);
if (res.second) {
slot_type* slot = slot_array() + res.first;
@@ -1997,13 +2179,25 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- Y_ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, it.generation(), it.generation_ptr(),
- "erase()");
+ AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
- iterator erase(const_iterator first, const_iterator last) {
+ iterator erase(const_iterator first,
+ const_iterator last) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ // We check for empty first because ClearBackingArray requires that
+ // capacity() > 0 as a precondition.
+ if (empty()) return end();
+ if (first == begin() && last == end()) {
+ // TODO(ezb): we access control bytes in destroy_slots so it could make
+ // sense to combine destroy_slots and ClearBackingArray to avoid cache
+ // misses when the table is large. Note that we also do this in clear().
+ destroy_slots();
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+ common().set_reserved_growth(common().reservation_size());
+ return end();
+ }
while (first != last) {
erase(first++);
}
@@ -2032,9 +2226,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- Y_ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
- position.inner_.generation(),
- position.inner_.generation_ptr(), "extract()");
+ AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+ position.inner_.generation_ptr(), "extract()");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -2064,8 +2257,7 @@ class raw_hash_set {
void rehash(size_t n) {
if (n == 0 && capacity() == 0) return;
if (n == 0 && size() == 0) {
- ClearBackingArray(common(), GetPolicyFunctions(),
- /*reuse=*/false);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
return;
}
@@ -2092,6 +2284,7 @@ class raw_hash_set {
infoz().RecordReservation(n);
}
common().reset_reserved_growth(n);
+ common().set_reservation_size(n);
}
// Extension API: support for heterogeneous keys.
@@ -2117,12 +2310,12 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
// Avoid probing if we won't be able to prefetch the addresses received.
-#ifdef Y_ABSL_INTERNAL_HAVE_PREFETCH
+#ifdef Y_ABSL_HAVE_PREFETCH
prefetch_heap_block();
auto seq = probe(common(), hash_ref()(key));
- base_internal::PrefetchT0(control() + seq.offset());
- base_internal::PrefetchT0(slot_array() + seq.offset());
-#endif // Y_ABSL_INTERNAL_HAVE_PREFETCH
+ PrefetchToLocalCache(control() + seq.offset());
+ PrefetchToLocalCache(slot_array() + seq.offset());
+#endif // Y_ABSL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -2133,7 +2326,8 @@ class raw_hash_set {
// 2. The type of the key argument doesn't have to be key_type. This is so
// called heterogeneous key support.
template <class K = key_type>
- iterator find(const key_arg<K>& key, size_t hash) {
+ iterator find(const key_arg<K>& key,
+ size_t hash) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto seq = probe(common(), hash);
slot_type* slot_ptr = slot_array();
const ctrl_t* ctrl = control();
@@ -2151,17 +2345,19 @@ class raw_hash_set {
}
}
template <class K = key_type>
- iterator find(const key_arg<K>& key) {
+ iterator find(const key_arg<K>& key) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
prefetch_heap_block();
return find(key, hash_ref()(key));
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key, size_t hash) const {
+ const_iterator find(const key_arg<K>& key,
+ size_t hash) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->find(key, hash);
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key) const {
+ const_iterator find(const key_arg<K>& key) const
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -2172,14 +2368,15 @@ class raw_hash_set {
}
template <class K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
template <class K = key_type>
std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K>& key) const {
+ const key_arg<K>& key) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
@@ -2313,8 +2510,8 @@ class raw_hash_set {
assert(IsValidCapacity(new_capacity));
auto* old_ctrl = control();
auto* old_slots = slot_array();
- const size_t old_capacity = common().capacity_;
- common().capacity_ = new_capacity;
+ const size_t old_capacity = common().capacity();
+ common().set_capacity(new_capacity);
initialize_slots();
auto* new_slots = slot_array();
@@ -2333,8 +2530,8 @@ class raw_hash_set {
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), old_ctrl,
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), old_ctrl - ControlOffset(),
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
infoz().RecordRehash(total_probe_length);
@@ -2357,8 +2554,8 @@ class raw_hash_set {
void rehash_and_grow_if_necessary() {
const size_t cap = capacity();
if (cap > Group::kWidth &&
- // Do these calcuations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= cap* uint64_t{25}) {
+ // Do these calculations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= cap * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
//
// Rehash in place if the current size is <= 25/32 of capacity.
@@ -2481,8 +2678,8 @@ class raw_hash_set {
rehash_and_grow_if_necessary();
target = find_first_non_full(common(), hash);
}
- ++common().size_;
- growth_left() -= IsEmpty(control()[target.offset]);
+ common().set_size(common().size() + 1);
+ set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
common().maybe_increment_generation_on_insert();
infoz().RecordInsert(hash, target.probe_length);
@@ -2507,10 +2704,10 @@ class raw_hash_set {
"constructed value does not match the lookup key");
}
- iterator iterator_at(size_t i) {
+ iterator iterator_at(size_t i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
- const_iterator iterator_at(size_t i) const {
+ const_iterator iterator_at(size_t i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
@@ -2527,19 +2724,24 @@ class raw_hash_set {
// side-effect.
//
// See `CapacityToGrowth()`.
- size_t& growth_left() { return common().growth_left(); }
-
- // Prefetch the heap-allocated memory region to resolve potential TLB misses.
- // This is intended to overlap with execution of calculating the hash for a
- // key.
- void prefetch_heap_block() const { base_internal::PrefetchT2(control()); }
+ size_t growth_left() const { return common().growth_left(); }
+ void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+
+ // Prefetch the heap-allocated memory region to resolve potential TLB and
+ // cache misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+ void prefetch_heap_block() const {
+#if Y_ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+ __builtin_prefetch(control(), 0, 1);
+#endif
+ }
CommonFields& common() { return settings_.template get<0>(); }
const CommonFields& common() const { return settings_.template get<0>(); }
- ctrl_t* control() const { return common().control_; }
+ ctrl_t* control() const { return common().control(); }
slot_type* slot_array() const {
- return static_cast<slot_type*>(common().slots_);
+ return static_cast<slot_type*>(common().slot_array());
}
HashtablezInfoHandle& infoz() { return common().infoz(); }
@@ -2565,16 +2767,16 @@ class raw_hash_set {
static_cast<slot_type*>(src));
}
// Note: dealloc_fn will only be used if we have a non-standard allocator.
- static void dealloc_fn(void* set, const PolicyFunctions&, ctrl_t* ctrl,
- void* slot_mem, size_t n) {
- auto* h = static_cast<raw_hash_set*>(set);
+ static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
+ auto* set = reinterpret_cast<raw_hash_set*>(&common);
// Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_mem, sizeof(slot_type) * n);
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ sizeof(slot_type) * common.capacity());
- Deallocate<alignof(slot_type)>(
- &h->alloc_ref(), ctrl,
- AllocSize(n, sizeof(slot_type), alignof(slot_type)));
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &set->alloc_ref(), common.backing_array_start(),
+ common.alloc_size(sizeof(slot_type), alignof(slot_type)));
}
static const PolicyFunctions& GetPolicyFunctions() {
@@ -2680,6 +2882,5 @@ Y_ABSL_NAMESPACE_END
} // namespace y_absl
#undef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
-#undef Y_ABSL_INTERNAL_ASSERT_IS_FULL
#endif // Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/crc32c.h b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/crc32c.h
index fb25687e28..bcd5b96a3c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/crc32c.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/crc32c.h
@@ -29,6 +29,7 @@
#include <ostream>
#include "y_absl/crc/internal/crc32c_inline.h"
+#include "y_absl/strings/str_format.h"
#include "y_absl/strings/string_view.h"
namespace y_absl {
@@ -61,10 +62,16 @@ class crc32c_t final {
friend bool operator!=(crc32c_t lhs, crc32c_t rhs) { return !(lhs == rhs); }
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, crc32c_t crc) {
+ y_absl::Format(&sink, "%08x", static_cast<uint32_t>(crc));
+ }
+
private:
uint32_t crc_;
};
+
namespace crc_internal {
// Non-inline code path for `y_absl::ExtendCrc32c()`. Do not call directly.
// Call `y_absl::ExtendCrc32c()` (defined below) instead.
@@ -174,7 +181,7 @@ crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,
//
// Streams the CRC32C value `crc` to the stream `os`.
inline std::ostream& operator<<(std::ostream& os, crc32c_t crc) {
- return os << static_cast<uint32_t>(crc);
+ return os << y_absl::StreamFormat("%08x", static_cast<uint32_t>(crc));
}
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/cpu_detect.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/cpu_detect.cc
index 8c8f8d5580..3a9e335643 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/cpu_detect.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/cpu_detect.cc
@@ -28,15 +28,12 @@
#include <intrin.h>
#endif
-namespace y_absl {
-Y_ABSL_NAMESPACE_BEGIN
-namespace crc_internal {
-
#if defined(__x86_64__) || defined(_M_X64)
-
-namespace {
-
-#if !defined(_WIN32) && !defined(_WIN64)
+#if Y_ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
+#elif !defined(_WIN32) && !defined(_WIN64)
// MSVC defines this function for us.
// https://learn.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex
static void __cpuid(int cpu_info[4], int info_type) {
@@ -46,6 +43,15 @@ static void __cpuid(int cpu_info[4], int info_type) {
: "a"(info_type), "c"(0));
}
#endif // !defined(_WIN32) && !defined(_WIN64)
+#endif // defined(__x86_64__) || defined(_M_X64)
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+#if defined(__x86_64__) || defined(_M_X64)
+
+namespace {
enum class Vendor {
kUnknown,
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.cc
index a5b04de0bb..b8410bbb4d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.cc
@@ -44,8 +44,8 @@
#include <cstdint>
#include "y_absl/base/internal/endian.h"
-#include "y_absl/base/internal/prefetch.h"
#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/prefetch.h"
#include "y_absl/crc/internal/crc_internal.h"
namespace y_absl {
@@ -176,9 +176,6 @@ CRCImpl* CRCImpl::NewInternal() {
return result;
}
-// The CRC of the empty string is always the CRC polynomial itself.
-void CRCImpl::Empty(uint32_t* crc) const { *crc = kCrc32cPoly; }
-
// The 32-bit implementation
void CRC32::InitTables() {
@@ -261,7 +258,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
const uint8_t* e = p + length;
uint32_t l = *crc;
- auto step_one_byte = [this, &p, &l] () {
+ auto step_one_byte = [this, &p, &l]() {
int c = (l & 0xff) ^ *p++;
l = this->table0_[c] ^ (l >> 8);
};
@@ -309,7 +306,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
// Process kStride interleaved swaths through the data in parallel.
while ((e - p) > kPrefetchHorizon) {
- base_internal::PrefetchNta(
+ PrefetchToLocalCacheNta(
reinterpret_cast<const void*>(p + kPrefetchHorizon));
// Process 64 bytes at a time
step_stride();
@@ -359,7 +356,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
void CRC32::ExtendByZeroesImpl(uint32_t* crc, size_t length,
const uint32_t zeroes_table[256],
- const uint32_t poly_table[256]) const {
+ const uint32_t poly_table[256]) {
if (length != 0) {
uint32_t l = *crc;
// For each ZEROES_BASE_LG bits in length
@@ -435,34 +432,6 @@ CRC* CRC::Crc32c() {
return singleton;
}
-// This Concat implementation works for arbitrary polynomials.
-void CRC::Concat(uint32_t* px, uint32_t y, size_t ylen) {
- // https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks
- // The CRC of a message M is the remainder of polynomial divison modulo G,
- // where the coefficient arithmetic is performed modulo 2 (so +/- are XOR):
- // R(x) = M(x) x**n (mod G)
- // (n is the degree of G)
- // In practice, we use an initial value A and a bitmask B to get
- // R = (A ^ B)x**|M| ^ Mx**n ^ B (mod G)
- // If M is the concatenation of two strings S and T, and Z is the string of
- // len(T) 0s, then the remainder CRC(ST) can be expressed as:
- // R = (A ^ B)x**|ST| ^ STx**n ^ B
- // = (A ^ B)x**|SZ| ^ SZx**n ^ B ^ Tx**n
- // = CRC(SZ) ^ Tx**n
- // CRC(Z) = (A ^ B)x**|T| ^ B
- // CRC(T) = (A ^ B)x**|T| ^ Tx**n ^ B
- // So R = CRC(SZ) ^ CRC(Z) ^ CRC(T)
- //
- // And further, since CRC(SZ) = Extend(CRC(S), Z),
- // CRC(SZ) ^ CRC(Z) = Extend(CRC(S) ^ CRC(''), Z).
- uint32_t z;
- uint32_t t;
- Empty(&z);
- t = *px ^ z;
- ExtendByZeroes(&t, ylen);
- *px = t ^ y;
-}
-
} // namespace crc_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.h
index 051015a5f3..62973c378f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc.h
@@ -40,9 +40,6 @@ class CRC {
public:
virtual ~CRC();
- // Place the CRC of the empty string in "*crc"
- virtual void Empty(uint32_t* crc) const = 0;
-
// If "*crc" is the CRC of bytestring A, place the CRC of
// the bytestring formed from the concatenation of A and the "length"
// bytes at "bytes" into "*crc".
@@ -53,22 +50,17 @@ class CRC {
// points to an array of "length" zero bytes.
virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0;
- // Inverse opration of ExtendByZeroes. If `crc` is the CRC value of a string
+ // Inverse operation of ExtendByZeroes. If `crc` is the CRC value of a string
// ending in `length` zero bytes, this returns a CRC value of that string
// with those zero bytes removed.
virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0;
- // If *px is the CRC (as defined by *crc) of some string X,
- // and y is the CRC of some string Y that is ylen bytes long, set
- // *px to the CRC of the concatenation of X followed by Y.
- virtual void Concat(uint32_t* px, uint32_t y, size_t ylen);
-
// Apply a non-linear transformation to "*crc" so that
// it is safe to CRC the result with the same polynomial without
// any reduction of error-detection ability in the outer CRC.
// Unscramble() performs the inverse transformation.
// It is strongly recommended that CRCs be scrambled before storage or
- // transmission, and unscrambled at the other end before futher manipulation.
+ // transmission, and unscrambled at the other end before further manipulation.
virtual void Scramble(uint32_t* crc) const = 0;
virtual void Unscramble(uint32_t* crc) const = 0;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc32_x86_arm_combined_simd.h b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc32_x86_arm_combined_simd.h
index 3e49100c8d..699d197111 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc32_x86_arm_combined_simd.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -33,7 +33,7 @@
#include <x86intrin.h>
#define Y_ABSL_CRC_INTERNAL_HAVE_X86_SIMD
-#elif defined(_MSC_VER) && defined(__AVX__)
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
// MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ.
#include <intrin.h>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.cc
index 4e8e88bce7..695f2db14a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.cc
@@ -121,7 +121,7 @@ void CrcCordState::Poison() {
}
} else {
// Add a fake corrupt chunk.
- rep->prefix_crc.push_back(PrefixCrc(0, crc32c_t{1}));
+ rep->prefix_crc.emplace_back(0, crc32c_t{1});
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.h b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.h
index 610b0fbbc6..d2d185144f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_cord_state.h
@@ -71,9 +71,9 @@ class CrcCordState {
struct Rep {
// `removed_prefix` is the crc and length of any prefix that has been
// removed from the Cord (for example, by calling
- // `CrcCord::RemovePrefix()`). To get the checkum of any prefix of the cord,
- // this value must be subtracted from `prefix_crc`. See `Checksum()` for an
- // example.
+ // `CrcCord::RemovePrefix()`). To get the checksum of any prefix of the
+ // cord, this value must be subtracted from `prefix_crc`. See `Checksum()`
+ // for an example.
//
// CrcCordState is said to be "normalized" if removed_prefix.length == 0.
PrefixCrc removed_prefix;
@@ -109,7 +109,7 @@ class CrcCordState {
// Returns true if the chunked CRC32C cached is normalized.
bool IsNormalized() const { return rep().removed_prefix.length == 0; }
- // Normalizes the chunked CRC32C checksum cache by substracting any removed
+ // Normalizes the chunked CRC32C checksum cache by subtracting any removed
// prefix from the chunks.
void Normalize();
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_internal.h
index 591b5200ee..5272c0b97a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_internal.h
@@ -60,18 +60,16 @@ constexpr uint64_t kScrambleHi = (static_cast<uint64_t>(0x4f1bbcdcU) << 32) |
constexpr uint64_t kScrambleLo = (static_cast<uint64_t>(0xf9ce6030U) << 32) |
static_cast<uint64_t>(0x2e76e41bU);
-class CRCImpl : public CRC { // Implemention of the abstract class CRC
+class CRCImpl : public CRC { // Implementation of the abstract class CRC
public:
using Uint32By256 = uint32_t[256];
- CRCImpl() {}
+ CRCImpl() = default;
~CRCImpl() override = default;
// The internal version of CRC::New().
static CRCImpl* NewInternal();
- void Empty(uint32_t* crc) const override;
-
// Fill in a table for updating a CRC by one word of 'word_size' bytes
// [last_lo, last_hi] contains the answer if the last bit in the word
// is set.
@@ -96,8 +94,8 @@ class CRCImpl : public CRC { // Implemention of the abstract class CRC
// This is the 32-bit implementation. It handles all sizes from 8 to 32.
class CRC32 : public CRCImpl {
public:
- CRC32() {}
- ~CRC32() override {}
+ CRC32() = default;
+ ~CRC32() override = default;
void Extend(uint32_t* crc, const void* bytes, size_t length) const override;
void ExtendByZeroes(uint32_t* crc, size_t length) const override;
@@ -111,16 +109,16 @@ class CRC32 : public CRCImpl {
// Common implementation guts for ExtendByZeroes and UnextendByZeroes().
//
// zeroes_table is a table as returned by FillZeroesTable(), containing
- // polynomials representing CRCs of strings-of-zeros of various lenghts,
+ // polynomials representing CRCs of strings-of-zeros of various lengths,
// and which can be combined by polynomial multiplication. poly_table is
// a table of CRC byte extension values. These tables are determined by
// the generator polynomial.
//
// These will be set to reverse_zeroes_ and reverse_table0_ for Unextend, and
// CRC32::zeroes_ and CRC32::table0_ for Extend.
- void ExtendByZeroesImpl(uint32_t* crc, size_t length,
- const uint32_t zeroes_table[256],
- const uint32_t poly_table[256]) const;
+ static void ExtendByZeroesImpl(uint32_t* crc, size_t length,
+ const uint32_t zeroes_table[256],
+ const uint32_t poly_table[256]);
uint32_t table0_[256]; // table of byte extensions
uint32_t zeroes_[256]; // table of zero extensions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_memcpy_x86_64.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_memcpy_x86_64.cc
index 9cef4b5454..fa20a9eb14 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_memcpy_x86_64.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_memcpy_x86_64.cc
@@ -52,8 +52,8 @@
#include <type_traits>
#include "y_absl/base/dynamic_annotations.h"
-#include "y_absl/base/internal/prefetch.h"
#include "y_absl/base/optimization.h"
+#include "y_absl/base/prefetch.h"
#include "y_absl/crc/crc32c.h"
#include "y_absl/crc/internal/cpu_detect.h"
#include "y_absl/crc/internal/crc_memcpy.h"
@@ -242,10 +242,8 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
while (copy_rounds > kBlocksPerCacheLine) {
// Prefetch kPrefetchAhead bytes ahead of each pointer.
for (size_t i = 0; i < kRegions; i++) {
- y_absl::base_internal::PrefetchT0(src_bytes + kPrefetchAhead +
- region_size * i);
- y_absl::base_internal::PrefetchT0(dst_bytes + kPrefetchAhead +
- region_size * i);
+ y_absl::PrefetchToLocalCache(src_bytes + kPrefetchAhead + region_size * i);
+ y_absl::PrefetchToLocalCache(dst_bytes + kPrefetchAhead + region_size * i);
}
// Load and store data, computing CRC on the way.
@@ -359,18 +357,18 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kIntelHaswell:
case CpuType::kIntelIvybridge:
return {
- .temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// INTEL_SANDYBRIDGE performs better with SSE than AVX.
case CpuType::kIntelSandybridge:
return {
- .temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new CrcNonTemporalMemcpyEngine(),
+ /*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
};
default:
- return {.temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new FallbackCrcMemcpyEngine()};
+ return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
}
#else
// Get the underlying architecture.
@@ -388,8 +386,8 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kAmdRome:
case CpuType::kAmdNaples:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<1, 2>(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<1, 2>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// PCLMULQDQ is slow and we don't have wide enough issue width to take
// advantage of it. For an unknown architecture, don't risk using CLMULs.
@@ -400,18 +398,18 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kIntelHaswell:
case CpuType::kIntelIvybridge:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// INTEL_SANDYBRIDGE performs better with SSE than AVX.
case CpuType::kIntelSandybridge:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(),
- .non_temporal = new CrcNonTemporalMemcpyEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
};
default:
- return {.temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new FallbackCrcMemcpyEngine()};
+ return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
}
#endif // UNDEFINED_BEHAVIOR_SANITIZER
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_x86_arm_combined.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_x86_arm_combined.cc
index 96e33d8f56..4ab374da9c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_x86_arm_combined.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/crc/internal/crc_x86_arm_combined.cc
@@ -21,7 +21,7 @@
#include "y_absl/base/config.h"
#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/internal/endian.h"
-#include "y_absl/base/internal/prefetch.h"
+#include "y_absl/base/prefetch.h"
#include "y_absl/crc/internal/cpu_detect.h"
#include "y_absl/crc/internal/crc.h"
#include "y_absl/crc/internal/crc32_x86_arm_combined_simd.h"
@@ -429,11 +429,11 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
Y_ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
Y_ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
Y_ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p + kPrefetchHorizonMedium));
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p1 + kPrefetchHorizonMedium));
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p2 + kPrefetchHorizonMedium));
}
// Don't run crc on last 8 bytes.
@@ -515,14 +515,14 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
}
for (size_t i = 1; i < bs; i++) {
- // Prefetch data for next itterations.
+ // Prefetch data for next iterations.
for (size_t j = 0; j < num_crc_streams; j++) {
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(crc_streams[j] + kPrefetchHorizon));
}
for (size_t j = 0; j < num_pclmul_streams; j++) {
- base_internal::PrefetchT0(reinterpret_cast<const char*>(
- pclmul_streams[j] + kPrefetchHorizon));
+ PrefetchToLocalCache(reinterpret_cast<const char*>(pclmul_streams[j] +
+ kPrefetchHorizon));
}
// We process each stream in 64 byte blocks. This can be written as
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
index 25ed376e0f..ab8b62dda2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.cc
@@ -31,6 +31,13 @@
#ifdef Y_ABSL_HAVE_MMAP
#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+
+#ifdef __linux__
+#include <sys/prctl.h>
#endif
#include <algorithm>
@@ -77,10 +84,10 @@ struct FailureSignalData {
struct sigaction previous_action;
// StructSigaction is used to silence -Wmissing-field-initializers.
using StructSigaction = struct sigaction;
- #define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
+#define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
#else
void (*previous_handler)(int);
- #define FSD_PREVIOUS_INIT SIG_DFL
+#define FSD_PREVIOUS_INIT SIG_DFL
#endif
};
@@ -132,7 +139,7 @@ const char* FailureSignalToString(int signo) {
#ifdef Y_ABSL_HAVE_SIGALTSTACK
static bool SetupAlternateStackOnce() {
-#if defined(__wasm__) || defined (__asjms__)
+#if defined(__wasm__) || defined(__asjms__)
const size_t page_mask = getpagesize() - 1;
#else
const size_t page_mask = static_cast<size_t>(sysconf(_SC_PAGESIZE)) - 1;
@@ -154,9 +161,6 @@ static bool SetupAlternateStackOnce() {
#ifndef MAP_STACK
#define MAP_STACK 0
#endif
-#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
sigstk.ss_sp = mmap(nullptr, sigstk.ss_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (sigstk.ss_sp == MAP_FAILED) {
@@ -172,6 +176,20 @@ static bool SetupAlternateStackOnce() {
if (sigaltstack(&sigstk, nullptr) != 0) {
Y_ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
}
+
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+ // Make a best-effort attempt to name the allocated region in
+ // /proc/$PID/smaps.
+ //
+ // The call to prctl() may fail if the kernel was not configured with the
+ // CONFIG_ANON_VMA_NAME kernel option. This is OK since the call is
+ // primarily a debugging aid.
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, sigstk.ss_sp, sigstk.ss_size,
+ "y_absl-signalstack");
+#endif
+#endif // __linux__
+
return true;
}
@@ -218,10 +236,6 @@ static void InstallOneFailureHandler(FailureSignalData* data,
#endif
-static void WriteToStderr(const char* data) {
- y_absl::raw_log_internal::AsyncSignalSafeWriteToStderr(data, strlen(data));
-}
-
static void WriteSignalMessage(int signo, int cpu,
void (*writerfn)(const char*)) {
char buf[96];
@@ -234,7 +248,7 @@ static void WriteSignalMessage(int signo, int cpu,
if (signal_string != nullptr && signal_string[0] != '\0') {
snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
signal_string,
- static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
on_cpu);
} else {
snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
@@ -297,7 +311,8 @@ static void PortableSleepForSeconds(int seconds) {
struct timespec sleep_time;
sleep_time.tv_sec = seconds;
sleep_time.tv_nsec = 0;
- while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {}
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
+ }
#endif
}
@@ -307,9 +322,7 @@ static void PortableSleepForSeconds(int seconds) {
// set amount of time. If AbslFailureSignalHandler() hangs for more than
// the alarm timeout, ImmediateAbortSignalHandler() will abort the
// program.
-static void ImmediateAbortSignalHandler(int) {
- RaiseToDefaultHandler(SIGABRT);
-}
+static void ImmediateAbortSignalHandler(int) { RaiseToDefaultHandler(SIGABRT); }
#endif
// y_absl::base_internal::GetTID() returns pid_t on most platforms, but
@@ -362,7 +375,10 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
#endif
// First write to stderr.
- WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
+ WriteFailureInfo(
+ signo, ucontext, my_cpu, +[](const char* data) {
+ y_absl::raw_log_internal::AsyncSignalSafeWriteError(data, strlen(data));
+ });
// Riskier code (because it is less likely to be async-signal-safe)
// goes after this point.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h
index b426264276..03ce76be42 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler.h
@@ -62,7 +62,7 @@ struct FailureSignalHandlerOptions {
// If true, try to run signal handlers on an alternate stack (if supported on
// the given platform). An alternate stack is useful for program crashes due
// to a stack overflow; by running on a alternate stack, the signal handler
- // may run even when normal stack space has been exausted. The downside of
+ // may run even when normal stack space has been exhausted. The downside of
// using an alternate stack is that extra memory for the alternate stack needs
// to be pre-allocated.
bool use_alternate_stack = true;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
index a7dde97206..9c5e588fbb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/elf_mem_image.h
@@ -33,7 +33,8 @@
#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \
!defined(__native_client__) && !defined(__asmjs__) && \
- !defined(__wasm__) && !defined(__HAIKU__)
+ !defined(__wasm__) && !defined(__HAIKU__) && !defined(__sun) && \
+ !defined(__VXWORKS__) && !defined(__hexagon__)
#define Y_ABSL_HAVE_ELF_MEM_IMAGE 1
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
index 62d600f5a8..284610b5d7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/examine_stack.cc
@@ -24,6 +24,9 @@
#ifdef Y_ABSL_HAVE_MMAP
#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
#endif
#if defined(__linux__) || defined(__APPLE__)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
index b9a7919222..04531613f8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -13,6 +13,7 @@
#include <cassert>
#include <cstdint>
#include <iostream>
+#include <limits>
#include "y_absl/base/attributes.h"
#include "y_absl/debugging/internal/address_is_readable.h"
@@ -20,6 +21,10 @@
#include "y_absl/debugging/stacktrace.h"
static const size_t kUnknownFrameSize = 0;
+// Stack end to use when we don't know the actual stack end
+// (effectively just the end of address space).
+constexpr uintptr_t kUnknownStackEnd =
+ std::numeric_limits<size_t>::max() - sizeof(void *);
#if defined(__linux__)
// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
@@ -79,8 +84,9 @@ static inline size_t ComputeStackFrameSize(const T* low,
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
-Y_ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
-static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+Y_ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_frame_pointer, const void *uc,
+ size_t stack_low, size_t stack_high) {
void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
bool check_frame_size = true;
@@ -94,16 +100,21 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
void **const pre_signal_frame_pointer =
reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
+ // The most recent signal always needs special handling to find the frame
+ // pointer, but a nested signal does not. If pre_signal_frame_pointer is
+ // earlier in the stack than the old_frame_pointer, then use it. If it is
+ // later, then we have already unwound through it and it needs no special
+ // handling.
+ if (pre_signal_frame_pointer >= old_frame_pointer) {
+ new_frame_pointer = pre_signal_frame_pointer;
+ }
// Check that alleged frame pointer is actually readable. This is to
// prevent "double fault" in case we hit the first fault due to e.g.
// stack corruption.
if (!y_absl::debugging_internal::AddressIsReadable(
- pre_signal_frame_pointer))
+ new_frame_pointer))
return nullptr;
- // Alleged frame pointer is readable, use it for further unwinding.
- new_frame_pointer = pre_signal_frame_pointer;
-
// Skip frame size check if we return from a signal. We may be using a
// an alternate stack for signals.
check_frame_size = false;
@@ -121,8 +132,26 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
const size_t frame_size =
ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
- if (frame_size == kUnknownFrameSize || frame_size > max_size)
- return nullptr;
+ if (frame_size == kUnknownFrameSize)
+ return nullptr;
+ // A very large frame may mean corrupt memory or an erroneous frame
+ // pointer. But also maybe just a plain-old large frame. Assume that if the
+ // frame is within the known stack, then it is valid.
+ if (frame_size > max_size) {
+ if (stack_high < kUnknownStackEnd &&
+ static_cast<size_t>(getpagesize()) < stack_low) {
+ const uintptr_t new_fp_u =
+ reinterpret_cast<uintptr_t>(new_frame_pointer);
+ // Stack bounds are known.
+ if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+ // new_frame_pointer is not within the known stack.
+ return nullptr;
+ }
+ } else {
+ // Stack bounds are unknown, prefer truncated stack to possible crash.
+ return nullptr;
+ }
+ }
}
return new_frame_pointer;
@@ -138,42 +167,49 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
#else
# error reading stack point not yet supported on this platform.
#endif
-
skip_count++; // Skip the frame for this function.
int n = 0;
+ // Assume that the first page is not stack.
+ size_t stack_low = static_cast<size_t>(getpagesize());
+ size_t stack_high = kUnknownStackEnd;
+
// The frame pointer points to low address of a frame. The first 64-bit
// word of a frame points to the next frame up the call chain, which normally
// is just after the high address of the current frame. The second word of
- // a frame contains return adress of to the caller. To find a pc value
+ // a frame contains return address of to the caller. To find a pc value
// associated with the current frame, we need to go down a level in the call
// chain. So we remember return the address of the last frame seen. This
// does not work for the first stack frame, which belongs to UnwindImp() but
// we skip the frame for UnwindImp() anyway.
void* prev_return_address = nullptr;
+ // The nth frame size is the difference between the nth frame pointer and the
+ // the frame pointer below it in the call chain. There is no frame below the
+ // leaf frame, but this function is the leaf anyway, and we skip it.
+ void** prev_frame_pointer = nullptr;
- while (frame_pointer && n < max_depth) {
- // The y_absl::GetStackFrames routine is called when we are in some
- // informational context (the failure signal handler for example).
- // Use the non-strict unwinding rules to produce a stack trace
- // that is as complete as possible (even if it contains a few bogus
- // entries in some rare cases).
- void **next_frame_pointer =
- NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
-
+ while (frame_pointer && n < max_depth) {
if (skip_count > 0) {
skip_count--;
} else {
result[n] = prev_return_address;
if (IS_STACK_FRAMES) {
sizes[n] = static_cast<int>(
- ComputeStackFrameSize(frame_pointer, next_frame_pointer));
+ ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
}
n++;
}
prev_return_address = frame_pointer[1];
- frame_pointer = next_frame_pointer;
+ prev_frame_pointer = frame_pointer;
+ // The y_absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ frame_pointer, ucp, stack_low, stack_high);
}
+
if (min_dropped_frames != nullptr) {
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
@@ -185,8 +221,8 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
} else {
num_dropped_frames++;
}
- frame_pointer =
- NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+ frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ frame_pointer, ucp, stack_low, stack_high);
}
*min_dropped_frames = num_dropped_frames;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
index 56dcd40c67..9edbe28c84 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -57,7 +57,7 @@ static inline void *StacktracePowerPCGetLR(void **sp) {
// This check is in case the compiler doesn't define _CALL_SYSV.
return *(sp+1);
#else
-#error Need to specify the PPC ABI for your archiecture.
+#error Need to specify the PPC ABI for your architecture.
#endif
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
index 278a0b6da4..3ee3502b19 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/stacktrace_x86-inl.inc
@@ -40,7 +40,7 @@ using y_absl::debugging_internal::AddressIsReadable;
#if defined(__linux__) && defined(__i386__)
// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
-// preceeding "syscall" or "sysenter".
+// preceding "syscall" or "sysenter".
// If __kernel_vsyscall uses frame pointer, answer 0.
//
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h
index c404b3cf91..90a78d5317 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/internal/symbolize.h
@@ -115,7 +115,7 @@ bool RemoveSymbolDecorator(int ticket);
// Remove all installed decorators. Returns true if successful, false if
// symbolization is currently in progress.
-bool RemoveAllSymbolDecorators(void);
+bool RemoveAllSymbolDecorators();
// Registers an address range to a file mapping.
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
index 1cddc78cc8..323d50604c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/leak_check.cc
@@ -65,8 +65,8 @@ bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
+LeakCheckDisabler::LeakCheckDisabler() = default;
+LeakCheckDisabler::~LeakCheckDisabler() = default;
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
index 33ad4a9b0c..81e31e01c8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_elf.inc
@@ -532,6 +532,11 @@ bool ForEachSection(int fd,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
@@ -584,6 +589,11 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
@@ -648,8 +658,10 @@ static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1,
}
// Return true if an address is inside a section.
-static bool InSection(const void *address, const ElfW(Shdr) * section) {
- const char *start = reinterpret_cast<const char *>(section->sh_addr);
+static bool InSection(const void *address, ptrdiff_t relocation,
+ const ElfW(Shdr) * section) {
+ const char *start = reinterpret_cast<const char *>(
+ section->sh_addr + static_cast<ElfW(Addr)>(relocation));
size_t size = static_cast<size_t>(section->sh_size);
return start <= address && address < (start + size);
}
@@ -689,8 +701,8 @@ static Y_ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
// starting address. However, we do not always want to use the real
// starting address because we sometimes want to symbolize a function
// pointer into the .opd section, e.g. FindSymbol(&foo,...).
- const bool pc_in_opd =
- kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd);
+ const bool pc_in_opd = kPlatformUsesOPDSections && opd != nullptr &&
+ InSection(pc, relocation, opd);
const bool deref_function_descriptor_pointer =
kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd;
@@ -730,7 +742,7 @@ static Y_ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
#endif
if (deref_function_descriptor_pointer &&
- InSection(original_start_address, opd)) {
+ InSection(original_start_address, /*relocation=*/0, opd)) {
// The opd section is mapped into memory. Just dereference
// start_address to get the first double word, which points to the
// function entry.
@@ -1326,7 +1338,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
const int phnum = obj->elf_header.e_phnum;
const int phentsize = obj->elf_header.e_phentsize;
auto phoff = static_cast<off_t>(obj->elf_header.e_phoff);
- size_t num_executable_load_segments = 0;
+ size_t num_interesting_load_segments = 0;
for (int j = 0; j < phnum; j++) {
ElfW(Phdr) phdr;
if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) {
@@ -1335,23 +1347,35 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
return false;
}
phoff += phentsize;
- constexpr int rx = PF_X | PF_R;
- if (phdr.p_type != PT_LOAD || (phdr.p_flags & rx) != rx) {
- // Not a LOAD segment, or not executable code.
+
+#if defined(__powerpc__) && !(_CALL_ELF > 1)
+ // On the PowerPC ELF v1 ABI, function pointers actually point to function
+ // descriptors. These descriptors are stored in an .opd section, which is
+ // mapped read-only. We thus need to look at all readable segments, not
+ // just the executable ones.
+ constexpr int interesting = PF_R;
+#else
+ constexpr int interesting = PF_X | PF_R;
+#endif
+
+ if (phdr.p_type != PT_LOAD
+ || (phdr.p_flags & interesting) != interesting) {
+ // Not a LOAD segment, not executable code, and not a function
+ // descriptor.
continue;
}
- if (num_executable_load_segments < obj->phdr.size()) {
- memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr));
+ if (num_interesting_load_segments < obj->phdr.size()) {
+ memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr));
} else {
Y_ABSL_RAW_LOG(
- WARNING, "%s: too many executable LOAD segments: %zu >= %zu",
- obj->filename, num_executable_load_segments, obj->phdr.size());
+ WARNING, "%s: too many interesting LOAD segments: %zu >= %zu",
+ obj->filename, num_interesting_load_segments, obj->phdr.size());
break;
}
}
- if (num_executable_load_segments == 0) {
- // This object has no "r-x" LOAD segments. That's unexpected.
- Y_ABSL_RAW_LOG(WARNING, "%s: no executable LOAD segments", obj->filename);
+ if (num_interesting_load_segments == 0) {
+ // This object has no interesting LOAD segments. That's unexpected.
+ Y_ABSL_RAW_LOG(WARNING, "%s: no interesting LOAD segments", obj->filename);
return false;
}
}
@@ -1379,8 +1403,8 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) {
// X in the file will have a start address of [true relocation]+X.
relocation = static_cast<ptrdiff_t>(start_addr - obj->offset);
- // Note: some binaries have multiple "rx" LOAD segments. We must
- // find the right one.
+ // Note: some binaries have multiple LOAD segments that can contain
+ // function pointers. We must find the right one.
ElfW(Phdr) *phdr = nullptr;
for (size_t j = 0; j < obj->phdr.size(); j++) {
ElfW(Phdr) &p = obj->phdr[j];
@@ -1390,7 +1414,7 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) {
Y_ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type");
break;
}
- if (pc < reinterpret_cast<void *>(start_addr + p.p_memsz)) {
+ if (pc < reinterpret_cast<void *>(start_addr + p.p_vaddr + p.p_memsz)) {
phdr = &p;
break;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc
index 5aa97cca46..558160f970 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/symbolize_emscripten.inc
@@ -50,6 +50,9 @@ bool Symbolize(const void* pc, char* out, int out_size) {
if (!HaveOffsetConverter()) {
return false;
}
+ if (pc == nullptr || out_size <= 0) {
+ return false;
+ }
const char* func_name = emscripten_pc_get_function(pc);
if (func_name == nullptr) {
return false;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag.h
index 30c2bb07a5..1a6f3f5447 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/commandlineflag.h
@@ -186,7 +186,7 @@ class CommandLineFlag {
// command line.
virtual bool IsSpecifiedOnCommandLine() const = 0;
- // Validates supplied value usign validator or parseflag routine
+ // Validates supplied value using validator or parseflag routine
virtual bool ValidateInputValue(y_absl::string_view value) const = 0;
// Checks that flags default value can be converted to string and back to the
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag.cc
index d9c0f4d40e..531df85799 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/commandlineflag.cc
@@ -19,7 +19,7 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-FlagStateInterface::~FlagStateInterface() {}
+FlagStateInterface::~FlagStateInterface() = default;
} // namespace flags_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
index 30fcfb0eb0..2e43977966 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.cc
@@ -197,7 +197,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id,
FlagFastTypeId lhs_type_id = flags_internal::FastTypeId(op_);
// `rhs_type_id` is the fast type id corresponding to the declaration
- // visibile at the call site. `lhs_type_id` is the fast type id
+ // visible at the call site. `lhs_type_id` is the fast type id
// corresponding to the type specified in flag definition. They must match
// for this operation to be well-defined.
if (Y_ABSL_PREDICT_TRUE(lhs_type_id == rhs_type_id)) return;
@@ -238,7 +238,7 @@ void FlagImpl::StoreValue(const void* src) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic: {
- // Load the current value to avoid setting 'init' bit manualy.
+ // Load the current value to avoid setting 'init' bit manually.
int64_t one_word_val = OneWordValue().load(std::memory_order_acquire);
std::memcpy(&one_word_val, src, Sizeof(op_));
OneWordValue().store(one_word_val, std::memory_order_release);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
index bce5e32656..6e3e99dbe1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag.h
@@ -121,7 +121,7 @@ inline void* Clone(FlagOpFn op, const void* obj) {
flags_internal::CopyConstruct(op, obj, res);
return res;
}
-// Returns true if parsing of input text is successfull.
+// Returns true if parsing of input text is successful.
inline bool Parse(FlagOpFn op, y_absl::string_view text, void* dst,
TString* error) {
return op(FlagOp::kParse, &text, dst, error) != nullptr;
@@ -139,12 +139,12 @@ inline size_t Sizeof(FlagOpFn op) {
return static_cast<size_t>(reinterpret_cast<intptr_t>(
op(FlagOp::kSizeof, nullptr, nullptr, nullptr)));
}
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
inline FlagFastTypeId FastTypeId(FlagOpFn op) {
return reinterpret_cast<FlagFastTypeId>(
op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr));
}
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
inline const std::type_info* RuntimeTypeId(FlagOpFn op) {
return reinterpret_cast<const std::type_info*>(
op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr));
@@ -223,12 +223,12 @@ extern const char kStrippedFlagHelp[];
// first overload if possible. If help message is evaluatable on constexpr
// context We'll be able to make FixedCharArray out of it and we'll choose first
// overload. In this case the help message expression is immediately evaluated
-// and is used to construct the y_absl::Flag. No additionl code is generated by
+// and is used to construct the y_absl::Flag. No additional code is generated by
// Y_ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the
// consideration, in which case the second overload will be used. The second
// overload does not attempt to evaluate the help message expression
-// immediately and instead delays the evaluation by returing the function
-// pointer (&T::NonConst) genering the help message when necessary. This is
+// immediately and instead delays the evaluation by returning the function
+// pointer (&T::NonConst) generating the help message when necessary. This is
// evaluatable in constexpr context, but the cost is an extra function being
// generated in the Y_ABSL_FLAG code.
template <typename Gen, size_t N>
@@ -308,19 +308,20 @@ constexpr int64_t UninitializedFlagValue() {
}
template <typename T>
-using FlagUseValueAndInitBitStorage = std::integral_constant<
- bool, y_absl::type_traits_internal::is_trivially_copyable<T>::value &&
- std::is_default_constructible<T>::value && (sizeof(T) < 8)>;
+using FlagUseValueAndInitBitStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ std::is_default_constructible<T>::value &&
+ (sizeof(T) < 8)>;
template <typename T>
-using FlagUseOneWordStorage = std::integral_constant<
- bool, y_absl::type_traits_internal::is_trivially_copyable<T>::value &&
- (sizeof(T) <= 8)>;
+using FlagUseOneWordStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ (sizeof(T) <= 8)>;
template <class T>
-using FlagUseSequenceLockStorage = std::integral_constant<
- bool, y_absl::type_traits_internal::is_trivially_copyable<T>::value &&
- (sizeof(T) > 8)>;
+using FlagUseSequenceLockStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ (sizeof(T) > 8)>;
enum class FlagValueStorageKind : uint8_t {
kValueAndInitBit = 0,
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag_msvc.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag_msvc.inc
index f9beb09f90..53ac6c9183 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag_msvc.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/flag_msvc.inc
@@ -29,7 +29,7 @@
// second level of protection is a global Mutex, so if two threads attempt to
// construct the flag concurrently only one wins.
//
-// This solution is based on a recomendation here:
+// This solution is based on a recommendation here:
// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454
namespace flags_internal {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/parse.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/parse.h
index 9ef0f310af..82e4d91473 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/parse.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/parse.h
@@ -16,11 +16,14 @@
#ifndef Y_ABSL_FLAGS_INTERNAL_PARSE_H_
#define Y_ABSL_FLAGS_INTERNAL_PARSE_H_
+#include <iostream>
+#include <ostream>
#include <util/generic/string.h>
#include <vector>
#include "y_absl/base/config.h"
#include "y_absl/flags/declare.h"
+#include "y_absl/flags/internal/usage.h"
#include "y_absl/strings/string_view.h"
Y_ABSL_DECLARE_FLAG(std::vector<TString>, flagfile);
@@ -32,7 +35,6 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-enum class ArgvListAction { kRemoveParsedArgs, kKeepParsedArgs };
enum class UsageFlagsAction { kHandleUsage, kIgnoreUsage };
enum class OnUndefinedFlag {
kIgnoreUndefined,
@@ -40,10 +42,15 @@ enum class OnUndefinedFlag {
kAbortIfUndefined
};
-std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
- ArgvListAction arg_list_act,
- UsageFlagsAction usage_flag_act,
- OnUndefinedFlag on_undef_flag);
+// This is not a public interface. This interface exists to expose the ability
+// to change help output stream in case of parsing errors. This is used by
+// internal unit tests to validate expected outputs.
+// When this was written, `EXPECT_EXIT` only supported matchers on stderr,
+// but not on stdout.
+std::vector<char*> ParseCommandLineImpl(
+ int argc, char* argv[], UsageFlagsAction usage_flag_action,
+ OnUndefinedFlag undef_flag_action,
+ std::ostream& error_help_output = std::cout);
// --------------------------------------------------------------------
// Inspect original command line
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.cc
index a4331c33fa..c654945141 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.cc
@@ -18,6 +18,7 @@
#include <stdint.h>
#include <algorithm>
+#include <cstdlib>
#include <functional>
#include <iterator>
#include <map>
@@ -91,8 +92,16 @@ class XMLElement {
case '>':
out << "&gt;";
break;
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\t':
+ out << " ";
+ break;
default:
- out << c;
+ if (IsValidXmlCharacter(static_cast<unsigned char>(c))) {
+ out << c;
+ }
break;
}
}
@@ -101,6 +110,7 @@ class XMLElement {
}
private:
+ static bool IsValidXmlCharacter(unsigned char c) { return c >= 0x20; }
y_absl::string_view tag_;
y_absl::string_view txt_;
};
@@ -130,7 +140,7 @@ class FlagHelpPrettyPrinter {
for (auto line : y_absl::StrSplit(str, y_absl::ByAnyChar("\n\r"))) {
if (!tokens.empty()) {
// Keep line separators in the input string.
- tokens.push_back("\n");
+ tokens.emplace_back("\n");
}
for (auto token :
y_absl::StrSplit(line, y_absl::ByAnyChar(" \t"), y_absl::SkipEmpty())) {
@@ -354,8 +364,8 @@ void FlagsHelp(std::ostream& out, y_absl::string_view filter, HelpFormat format,
// --------------------------------------------------------------------
// Checks all the 'usage' command line flags to see if any have been set.
// If so, handles them appropriately.
-int HandleUsageFlags(std::ostream& out,
- y_absl::string_view program_usage_message) {
+HelpMode HandleUsageFlags(std::ostream& out,
+ y_absl::string_view program_usage_message) {
switch (GetFlagsHelpMode()) {
case HelpMode::kNone:
break;
@@ -363,25 +373,24 @@ int HandleUsageFlags(std::ostream& out,
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_help_flags,
GetFlagsHelpFormat(), program_usage_message);
- return 1;
+ break;
case HelpMode::kShort:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helpshort_flags,
GetFlagsHelpFormat(), program_usage_message);
- return 1;
+ break;
case HelpMode::kFull:
flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
program_usage_message);
- return 1;
+ break;
case HelpMode::kPackage:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helppackage_flags,
GetFlagsHelpFormat(), program_usage_message);
-
- return 1;
+ break;
case HelpMode::kMatch: {
TString substr = GetFlagsHelpMatchSubstr();
@@ -400,20 +409,19 @@ int HandleUsageFlags(std::ostream& out,
flags_internal::FlagsHelpImpl(
out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
}
-
- return 1;
+ break;
}
case HelpMode::kVersion:
if (flags_internal::GetUsageConfig().version_string)
out << flags_internal::GetUsageConfig().version_string();
// Unlike help, we may be asking for version in a script, so return 0
- return 0;
+ break;
case HelpMode::kOnlyCheckArgs:
- return 0;
+ break;
}
- return -1;
+ return GetFlagsHelpMode();
}
// --------------------------------------------------------------------
@@ -521,6 +529,22 @@ bool DeduceUsageFlags(y_absl::string_view name, y_absl::string_view value) {
return false;
}
+// --------------------------------------------------------------------
+
+void MaybeExit(HelpMode mode) {
+ switch (mode) {
+ case flags_internal::HelpMode::kNone:
+ return;
+ case flags_internal::HelpMode::kOnlyCheckArgs:
+ case flags_internal::HelpMode::kVersion:
+ std::exit(0);
+ default: // For all the other modes we exit with 1
+ std::exit(1);
+ }
+}
+
+// --------------------------------------------------------------------
+
} // namespace flags_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.h
index cc7adf27cb..0c059f5af7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/internal/usage.h
@@ -17,11 +17,11 @@
#define Y_ABSL_FLAGS_INTERNAL_USAGE_H_
#include <iosfwd>
+#include <ostream>
#include <util/generic/string.h>
#include "y_absl/base/config.h"
#include "y_absl/flags/commandlineflag.h"
-#include "y_absl/flags/declare.h"
#include "y_absl/strings/string_view.h"
// --------------------------------------------------------------------
@@ -36,6 +36,18 @@ enum class HelpFormat {
kHumanReadable,
};
+// The kind of usage help requested.
+enum class HelpMode {
+ kNone,
+ kImportant,
+ kShort,
+ kFull,
+ kPackage,
+ kMatch,
+ kVersion,
+ kOnlyCheckArgs
+};
+
// Streams the help message describing `flag` to `out`.
// The default value for `flag` is included in the output.
void FlagHelp(std::ostream& out, const CommandLineFlag& flag,
@@ -57,28 +69,18 @@ void FlagsHelp(std::ostream& out, y_absl::string_view filter,
// If any of the 'usage' related command line flags (listed on the bottom of
// this file) has been set this routine produces corresponding help message in
-// the specified output stream and returns:
-// 0 - if "version" or "only_check_flags" flags were set and handled.
-// 1 - if some other 'usage' related flag was set and handled.
-// -1 - if no usage flags were set on a commmand line.
-// Non negative return values are expected to be used as an exit code for a
-// binary.
-int HandleUsageFlags(std::ostream& out,
- y_absl::string_view program_usage_message);
+// the specified output stream and returns HelpMode that was handled. Otherwise
+// it returns HelpMode::kNone.
+HelpMode HandleUsageFlags(std::ostream& out,
+ y_absl::string_view program_usage_message);
// --------------------------------------------------------------------
-// Globals representing usage reporting flags
+// Encapsulates the logic of exiting the binary depending on handled help mode.
-enum class HelpMode {
- kNone,
- kImportant,
- kShort,
- kFull,
- kPackage,
- kMatch,
- kVersion,
- kOnlyCheckArgs
-};
+void MaybeExit(HelpMode mode);
+
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
// Returns substring to filter help output (--help=substr argument)
TString GetFlagsHelpMatchSubstr();
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.cc
index 51abb779fb..d3c1a39975 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.cc
@@ -19,6 +19,7 @@
#include <cmath>
#include <limits>
+#include <sstream>
#include <util/generic/string.h>
#include <type_traits>
#include <vector>
@@ -26,6 +27,7 @@
#include "y_absl/base/config.h"
#include "y_absl/base/log_severity.h"
#include "y_absl/base/macros.h"
+#include "y_absl/numeric/int128.h"
#include "y_absl/strings/ascii.h"
#include "y_absl/strings/match.h"
#include "y_absl/strings/numbers.h"
@@ -68,8 +70,10 @@ bool AbslParseFlag(y_absl::string_view text, bool* dst, TString*) {
// puts us in base 16. But leading 0 does not put us in base 8. It
// caused too many bugs when we had that behavior.
static int NumericBase(y_absl::string_view text) {
- const bool hex = (text.size() >= 2 && text[0] == '0' &&
- (text[1] == 'x' || text[1] == 'X'));
+ if (text.empty()) return 0;
+ size_t num_start = (text[0] == '-' || text[0] == '+') ? 1 : 0;
+ const bool hex = (text.size() >= num_start + 2 && text[num_start] == '0' &&
+ (text[num_start + 1] == 'x' || text[num_start + 1] == 'X'));
return hex ? 16 : 10;
}
@@ -125,6 +129,32 @@ bool AbslParseFlag(y_absl::string_view text, unsigned long long* dst,
return ParseFlagImpl(text, *dst);
}
+bool AbslParseFlag(y_absl::string_view text, y_absl::int128* dst, TString*) {
+ text = y_absl::StripAsciiWhitespace(text);
+
+ // check hex
+ int base = NumericBase(text);
+ if (!y_absl::numbers_internal::safe_strto128_base(text, dst, base)) {
+ return false;
+ }
+
+ return base == 16 ? y_absl::SimpleHexAtoi(text, dst)
+ : y_absl::SimpleAtoi(text, dst);
+}
+
+bool AbslParseFlag(y_absl::string_view text, y_absl::uint128* dst, TString*) {
+ text = y_absl::StripAsciiWhitespace(text);
+
+ // check hex
+ int base = NumericBase(text);
+ if (!y_absl::numbers_internal::safe_strtou128_base(text, dst, base)) {
+ return false;
+ }
+
+ return base == 16 ? y_absl::SimpleHexAtoi(text, dst)
+ : y_absl::SimpleAtoi(text, dst);
+}
+
// --------------------------------------------------------------------
// AbslParseFlag for floating point types.
@@ -171,6 +201,17 @@ TString Unparse(long v) { return y_absl::StrCat(v); }
TString Unparse(unsigned long v) { return y_absl::StrCat(v); }
TString Unparse(long long v) { return y_absl::StrCat(v); }
TString Unparse(unsigned long long v) { return y_absl::StrCat(v); }
+TString Unparse(y_absl::int128 v) {
+ std::stringstream ss;
+ ss << v;
+ return ss.str();
+}
+TString Unparse(y_absl::uint128 v) {
+ std::stringstream ss;
+ ss << v;
+ return ss.str();
+}
+
template <typename T>
TString UnparseFloatingPointVal(T v) {
// digits10 is guaranteed to roundtrip correctly in string -> value -> string
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
index 237b47a7eb..c6ed3a5f72 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/marshalling.h
@@ -200,6 +200,7 @@
#define Y_ABSL_FLAGS_MARSHALLING_H_
#include "y_absl/base/config.h"
+#include "y_absl/numeric/int128.h"
#if defined(Y_ABSL_HAVE_STD_OPTIONAL) && !defined(Y_ABSL_USES_STD_OPTIONAL)
#include <optional>
@@ -233,6 +234,8 @@ bool AbslParseFlag(y_absl::string_view, unsigned long*, TString*); // NOLINT
bool AbslParseFlag(y_absl::string_view, long long*, TString*); // NOLINT
bool AbslParseFlag(y_absl::string_view, unsigned long long*, // NOLINT
TString*);
+bool AbslParseFlag(y_absl::string_view, y_absl::int128*, TString*); // NOLINT
+bool AbslParseFlag(y_absl::string_view, y_absl::uint128*, TString*); // NOLINT
bool AbslParseFlag(y_absl::string_view, float*, TString*);
bool AbslParseFlag(y_absl::string_view, double*, TString*);
bool AbslParseFlag(y_absl::string_view, TString*, TString*);
@@ -310,6 +313,8 @@ TString Unparse(long v); // NOLINT
TString Unparse(unsigned long v); // NOLINT
TString Unparse(long long v); // NOLINT
TString Unparse(unsigned long long v); // NOLINT
+TString Unparse(y_absl::int128 v);
+TString Unparse(y_absl::uint128 v);
TString Unparse(float v);
TString Unparse(double v);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.cc
index 9eea65e254..4365d0952a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.cc
@@ -19,9 +19,10 @@
#include <algorithm>
#include <cstdint>
+#include <cstdlib>
#include <fstream>
#include <iostream>
-#include <iterator>
+#include <ostream>
#include <util/generic/string.h>
#include <tuple>
#include <utility>
@@ -98,6 +99,8 @@ struct SpecifiedFlagsCompare {
Y_ABSL_NAMESPACE_END
} // namespace y_absl
+// These flags influence how command line flags are parsed and are only intended
+// to be set on the command line. Avoid reading or setting them from C++ code.
Y_ABSL_FLAG(std::vector<TString>, flagfile, {},
"comma-separated list of files to load flags from")
.OnUpdate([]() {
@@ -147,6 +150,8 @@ Y_ABSL_FLAG(std::vector<TString>, tryfromenv, {},
y_absl::flags_internal::tryfromenv_needs_processing = true;
});
+// Rather than reading or setting --undefok from C++ code, please consider using
+// Y_ABSL_RETIRED_FLAG instead.
Y_ABSL_FLAG(std::vector<TString>, undefok, {},
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
@@ -190,7 +195,7 @@ bool ArgsList::ReadFromFlagfile(const TString& flag_file_name) {
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args_.push_back("");
+ args_.emplace_back("");
std::string line;
bool success = true;
@@ -212,7 +217,7 @@ bool ArgsList::ReadFromFlagfile(const TString& flag_file_name) {
break;
}
- args_.push_back(TString(stripped));
+ args_.emplace_back(stripped);
continue;
}
@@ -278,7 +283,7 @@ std::tuple<y_absl::string_view, y_absl::string_view, bool> SplitNameAndValue(
return std::make_tuple("", "", false);
}
- auto equal_sign_pos = arg.find("=");
+ auto equal_sign_pos = arg.find('=');
y_absl::string_view flag_name = arg.substr(0, equal_sign_pos);
@@ -367,7 +372,7 @@ bool ReadFlagsFromEnv(const std::vector<TString>& flag_names,
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args.push_back("");
+ args.emplace_back("");
for (const auto& flag_name : flag_names) {
// Avoid infinite recursion.
@@ -416,7 +421,7 @@ bool HandleGeneratorFlags(std::vector<ArgsList>& input_args,
// programmatically before invoking ParseCommandLine. Note that we do not
// actually process arguments specified in the flagfile, but instead
// create a secondary arguments list to be processed along with the rest
- // of the comamnd line arguments. Since we always the process most recently
+ // of the command line arguments. Since we always the process most recently
// created list of arguments first, this will result in flagfile argument
// being processed before any other argument in the command line. If
// FLAGS_flagfile contains more than one file name we create multiple new
@@ -599,6 +604,34 @@ bool CanIgnoreUndefinedFlag(y_absl::string_view flag_name) {
return false;
}
+// --------------------------------------------------------------------
+
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags,
+ bool report_as_fatal_error) {
+ for (const auto& unrecognized : unrecognized_flags) {
+ // Verify if flag_name has the "no" already removed
+ std::vector<TString> misspelling_hints;
+ if (unrecognized.source == UnrecognizedFlag::kFromArgv) {
+ misspelling_hints =
+ flags_internal::GetMisspellingHints(unrecognized.flag_name);
+ }
+
+ if (misspelling_hints.empty()) {
+ flags_internal::ReportUsageError(
+ y_absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+ "'"),
+ report_as_fatal_error);
+ } else {
+ flags_internal::ReportUsageError(
+ y_absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+ "'. Did you mean: ",
+ y_absl::StrJoin(misspelling_hints, ", "), " ?"),
+ report_as_fatal_error);
+ }
+ }
+}
+
} // namespace
// --------------------------------------------------------------------
@@ -638,7 +671,7 @@ std::vector<TString> GetMisspellingHints(const y_absl::string_view flag) {
const size_t maxCutoff = std::min(flag.size() / 2 + 1, kMaxDistance);
auto undefok = y_absl::GetFlag(FLAGS_undefok);
BestHints best_hints(static_cast<uint8_t>(maxCutoff));
- y_absl::flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
+ flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
if (best_hints.hints.size() >= kMaxHints) return;
uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
flag, f.Name(), best_hints.best_distance);
@@ -664,59 +697,94 @@ std::vector<TString> GetMisspellingHints(const y_absl::string_view flag) {
// --------------------------------------------------------------------
std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
- ArgvListAction arg_list_act,
- UsageFlagsAction usage_flag_act,
- OnUndefinedFlag on_undef_flag) {
- Y_ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
+ UsageFlagsAction usage_flag_action,
+ OnUndefinedFlag undef_flag_action,
+ std::ostream& error_help_output) {
+ std::vector<char*> positional_args;
+ std::vector<UnrecognizedFlag> unrecognized_flags;
- // Once parsing has started we will not have more flag registrations.
- // If we did, they would be missing during parsing, which is a problem on
- // itself.
- flags_internal::FinalizeRegistry();
+ auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+ argc, argv, positional_args, unrecognized_flags, usage_flag_action);
- // This routine does not return anything since we abort on failure.
- CheckDefaultValuesParsingRoundtrip();
+ if (undef_flag_action != OnUndefinedFlag::kIgnoreUndefined) {
+ flags_internal::ReportUnrecognizedFlags(
+ unrecognized_flags,
+ (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined));
- std::vector<TString> flagfile_value;
+ if (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined) {
+ if (!unrecognized_flags.empty()) {
+ flags_internal::HandleUsageFlags(error_help_output,
+ ProgramUsageMessage()); std::exit(1);
+ }
+ }
+ }
+
+ flags_internal::MaybeExit(help_mode);
+
+ return positional_args;
+}
+
+// --------------------------------------------------------------------
+// This function handles all Abseil Flags and built-in usage flags and, if any
+// help mode was handled, it returns that help mode. The caller of this function
+// can decide to exit based on the returned help mode.
+// The caller may decide to handle unrecognized positional arguments and
+// unrecognized flags first before exiting.
+//
+// Returns:
+// * HelpMode::kFull if parsing errors were detected in recognized arguments
+// * The HelpMode that was handled in case when `usage_flag_action` is
+// UsageFlagsAction::kHandleUsage and a usage flag was specified on the
+// commandline
+// * Otherwise it returns HelpMode::kNone
+HelpMode ParseAbseilFlagsOnlyImpl(
+ int argc, char* argv[], std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags,
+ UsageFlagsAction usage_flag_action) {
+ Y_ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
+
+ using flags_internal::ArgsList;
+ using flags_internal::specified_flags;
+
+ std::vector<TString> flagfile_value;
std::vector<ArgsList> input_args;
- input_args.push_back(ArgsList(argc, argv));
- std::vector<char*> output_args;
- std::vector<char*> positional_args;
- output_args.reserve(static_cast<size_t>(argc));
+ // Once parsing has started we will not allow more flag registrations.
+ flags_internal::FinalizeRegistry();
+
+ // This routine does not return anything since we abort on failure.
+ flags_internal::CheckDefaultValuesParsingRoundtrip();
- // This is the list of undefined flags. The element of the list is the pair
- // consisting of boolean indicating if flag came from command line (vs from
- // some flag file we've read) and flag name.
- // TODO(rogeeff): Eliminate the first element in the pair after cleanup.
- std::vector<std::pair<bool, TString>> undefined_flag_names;
+ input_args.push_back(ArgsList(argc, argv));
// Set program invocation name if it is not set before.
- if (ProgramInvocationName() == "UNKNOWN") {
+ if (flags_internal::ProgramInvocationName() == "UNKNOWN") {
flags_internal::SetProgramInvocationName(argv[0]);
}
- output_args.push_back(argv[0]);
+ positional_args.push_back(argv[0]);
- y_absl::MutexLock l(&specified_flags_guard);
+ y_absl::MutexLock l(&flags_internal::specified_flags_guard);
if (specified_flags == nullptr) {
specified_flags = new std::vector<const CommandLineFlag*>;
} else {
specified_flags->clear();
}
- // Iterate through the list of the input arguments. First level are arguments
- // originated from argc/argv. Following levels are arguments originated from
- // recursive parsing of flagfile(s).
+ // Iterate through the list of the input arguments. First level are
+ // arguments originated from argc/argv. Following levels are arguments
+ // originated from recursive parsing of flagfile(s).
bool success = true;
while (!input_args.empty()) {
- // 10. First we process the built-in generator flags.
- success &= HandleGeneratorFlags(input_args, flagfile_value);
+ // First we process the built-in generator flags.
+ success &= flags_internal::HandleGeneratorFlags(input_args, flagfile_value);
- // 30. Select top-most (most recent) arguments list. If it is empty drop it
+ // Select top-most (most recent) arguments list. If it is empty drop it
// and re-try.
ArgsList& curr_list = input_args.back();
+ // Every ArgsList starts with real or fake program name, so we can always
+ // start by skipping it.
curr_list.PopFront();
if (curr_list.Size() == 0) {
@@ -724,13 +792,13 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
continue;
}
- // 40. Pick up the front remaining argument in the current list. If current
- // stack of argument lists contains only one element - we are processing an
- // argument from the original argv.
+ // Handle the next argument in the current list. If the stack of argument
+ // lists contains only one element - we are processing an argument from
+ // the original argv.
y_absl::string_view arg(curr_list.Front());
bool arg_from_argv = input_args.size() == 1;
- // 50. If argument does not start with - or is just "-" - this is
+ // If argument does not start with '-' or is just "-" - this is
// positional argument.
if (!y_absl::ConsumePrefix(&arg, "-") || arg.empty()) {
Y_ABSL_INTERNAL_CHECK(arg_from_argv,
@@ -740,12 +808,8 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
continue;
}
- if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs)) {
- output_args.push_back(argv[curr_list.FrontIndex()]);
- }
-
- // 60. Split the current argument on '=' to figure out the argument
- // name and value. If flag name is empty it means we've got "--". value
+ // Split the current argument on '=' to deduce the argument flag name and
+ // value. If flag name is empty it means we've got an "--" argument. Value
// can be empty either if there were no '=' in argument string at all or
// an argument looked like "--foo=". In a latter case is_empty_value is
// true.
@@ -753,10 +817,11 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
y_absl::string_view value;
bool is_empty_value = false;
- std::tie(flag_name, value, is_empty_value) = SplitNameAndValue(arg);
+ std::tie(flag_name, value, is_empty_value) =
+ flags_internal::SplitNameAndValue(arg);
- // 70. "--" alone means what it does for GNU: stop flags parsing. We do
- // not support positional arguments in flagfiles, so we just drop them.
+ // Standalone "--" argument indicates that the rest of the arguments are
+ // positional. We do not support positional arguments in flagfiles.
if (flag_name.empty()) {
Y_ABSL_INTERNAL_CHECK(arg_from_argv,
"Flagfile cannot contain positional argument");
@@ -765,43 +830,36 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
break;
}
- // 80. Locate the flag based on flag name. Handle both --foo and --nofoo
+ // Locate the flag based on flag name. Handle both --foo and --nofoo.
CommandLineFlag* flag = nullptr;
bool is_negative = false;
- std::tie(flag, is_negative) = LocateFlag(flag_name);
+ std::tie(flag, is_negative) = flags_internal::LocateFlag(flag_name);
if (flag == nullptr) {
// Usage flags are not modeled as Abseil flags. Locate them separately.
if (flags_internal::DeduceUsageFlags(flag_name, value)) {
continue;
}
-
- if (on_undef_flag != OnUndefinedFlag::kIgnoreUndefined) {
- undefined_flag_names.emplace_back(arg_from_argv,
- TString(flag_name));
- }
+ unrecognized_flags.emplace_back(arg_from_argv
+ ? UnrecognizedFlag::kFromArgv
+ : UnrecognizedFlag::kFromFlagfile,
+ flag_name);
continue;
}
- // 90. Deduce flag's value (from this or next argument)
- auto curr_index = curr_list.FrontIndex();
+ // Deduce flag's value (from this or next argument).
bool value_success = true;
- std::tie(value_success, value) =
- DeduceFlagValue(*flag, value, is_negative, is_empty_value, &curr_list);
+ std::tie(value_success, value) = flags_internal::DeduceFlagValue(
+ *flag, value, is_negative, is_empty_value, &curr_list);
success &= value_success;
- // If above call consumed an argument, it was a standalone value
- if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs) &&
- (curr_index != curr_list.FrontIndex())) {
- output_args.push_back(argv[curr_list.FrontIndex()]);
- }
-
- // 100. Set the located flag to a new new value, unless it is retired.
- // Setting retired flag fails, but we ignoring it here while also reporting
- // access to retired flag.
+ // Set the located flag to a new value, unless it is retired. Setting
+ // retired flag fails, but we ignoring it here while also reporting access
+ // to retired flag.
TString error;
if (!flags_internal::PrivateHandleAccessor::ParseFrom(
- *flag, value, SET_FLAGS_VALUE, kCommandLine, error)) {
+ *flag, value, flags_internal::SET_FLAGS_VALUE,
+ flags_internal::kCommandLine, error)) {
if (flag->IsRetired()) continue;
flags_internal::ReportUsageError(error, true);
@@ -811,78 +869,73 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
}
}
- for (const auto& flag_name : undefined_flag_names) {
- if (CanIgnoreUndefinedFlag(flag_name.second)) continue;
- // Verify if flag_name has the "no" already removed
- std::vector<TString> flags;
- if (flag_name.first) flags = GetMisspellingHints(flag_name.second);
- if (flags.empty()) {
- flags_internal::ReportUsageError(
- y_absl::StrCat("Unknown command line flag '", flag_name.second, "'"),
- true);
- } else {
- flags_internal::ReportUsageError(
- y_absl::StrCat("Unknown command line flag '", flag_name.second,
- "'. Did you mean: ", y_absl::StrJoin(flags, ", "), " ?"),
- true);
+ flags_internal::ResetGeneratorFlags(flagfile_value);
+
+ // All the remaining arguments are positional.
+ if (!input_args.empty()) {
+ for (size_t arg_index = input_args.back().FrontIndex();
+ arg_index < static_cast<size_t>(argc); ++arg_index) {
+ positional_args.push_back(argv[arg_index]);
}
+ }
- success = false;
+ // Trim and sort the vector.
+ specified_flags->shrink_to_fit();
+ std::sort(specified_flags->begin(), specified_flags->end(),
+ flags_internal::SpecifiedFlagsCompare{});
+
+ // Filter out unrecognized flags, which are ok to ignore.
+ std::vector<UnrecognizedFlag> filtered;
+ filtered.reserve(unrecognized_flags.size());
+ for (const auto& unrecognized : unrecognized_flags) {
+ if (flags_internal::CanIgnoreUndefinedFlag(unrecognized.flag_name))
+ continue;
+ filtered.push_back(unrecognized);
}
-#if Y_ABSL_FLAGS_STRIP_NAMES
+ std::swap(unrecognized_flags, filtered);
+
if (!success) {
+#if Y_ABSL_FLAGS_STRIP_NAMES
flags_internal::ReportUsageError(
"NOTE: command line flags are disabled in this build", true);
- }
+#else
+ flags_internal::HandleUsageFlags(std::cerr, ProgramUsageMessage());
#endif
-
- if (!success) {
- flags_internal::HandleUsageFlags(std::cout,
- ProgramUsageMessage());
- std::exit(1);
+ return HelpMode::kFull; // We just need to make sure the exit with
+ // code 1.
}
- if (usage_flag_act == UsageFlagsAction::kHandleUsage) {
- int exit_code = flags_internal::HandleUsageFlags(
- std::cout, ProgramUsageMessage());
+ return usage_flag_action == UsageFlagsAction::kHandleUsage
+ ? flags_internal::HandleUsageFlags(std::cout,
+ ProgramUsageMessage())
+ : HelpMode::kNone;
+}
- if (exit_code != -1) {
- std::exit(exit_code);
- }
- }
+} // namespace flags_internal
- ResetGeneratorFlags(flagfile_value);
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+ std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags) {
+ auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+ argc, argv, positional_args, unrecognized_flags,
+ flags_internal::UsageFlagsAction::kHandleUsage);
- // Reinstate positional args which were intermixed with flags in the arguments
- // list.
- for (auto arg : positional_args) {
- output_args.push_back(arg);
- }
+ flags_internal::MaybeExit(help_mode);
+}
- // All the remaining arguments are positional.
- if (!input_args.empty()) {
- for (size_t arg_index = input_args.back().FrontIndex();
- arg_index < static_cast<size_t>(argc); ++arg_index) {
- output_args.push_back(argv[arg_index]);
- }
- }
+// --------------------------------------------------------------------
- // Trim and sort the vector.
- specified_flags->shrink_to_fit();
- std::sort(specified_flags->begin(), specified_flags->end(),
- SpecifiedFlagsCompare{});
- return output_args;
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags) {
+ flags_internal::ReportUnrecognizedFlags(unrecognized_flags, true);
}
-} // namespace flags_internal
-
// --------------------------------------------------------------------
std::vector<char*> ParseCommandLine(int argc, char* argv[]) {
return flags_internal::ParseCommandLineImpl(
- argc, argv, flags_internal::ArgvListAction::kRemoveParsedArgs,
- flags_internal::UsageFlagsAction::kHandleUsage,
+ argc, argv, flags_internal::UsageFlagsAction::kHandleUsage,
flags_internal::OnUndefinedFlag::kAbortIfUndefined);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.h b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.h
index c8ee5b21d3..b84f55d770 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/parse.h
@@ -23,6 +23,7 @@
#ifndef Y_ABSL_FLAGS_PARSE_H_
#define Y_ABSL_FLAGS_PARSE_H_
+#include <util/generic/string.h>
#include <vector>
#include "y_absl/base/config.h"
@@ -31,27 +32,96 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
+// This type represent information about an unrecognized flag in the command
+// line.
+struct UnrecognizedFlag {
+ enum Source { kFromArgv, kFromFlagfile };
+
+ explicit UnrecognizedFlag(Source s, y_absl::string_view f)
+ : source(s), flag_name(f) {}
+ // This field indicates where we found this flag: on the original command line
+ // or read in some flag file.
+ Source source;
+ // Name of the flag we did not recognize in --flag_name=value or --flag_name.
+ TString flag_name;
+};
+
+inline bool operator==(const UnrecognizedFlag& lhs,
+ const UnrecognizedFlag& rhs) {
+ return lhs.source == rhs.source && lhs.flag_name == rhs.flag_name;
+}
+
+namespace flags_internal {
+
+HelpMode ParseAbseilFlagsOnlyImpl(
+ int argc, char* argv[], std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags,
+ UsageFlagsAction usage_flag_action);
+
+} // namespace flags_internal
+
+// ParseAbseilFlagsOnly()
+//
+// Parses a list of command-line arguments, passed in the `argc` and `argv[]`
+// parameters, into a set of Abseil Flag values, returning any unparsed
+// arguments in `positional_args` and `unrecognized_flags` output parameters.
+//
+// This function classifies all the arguments (including content of the
+// flagfiles, if any) into one of the following groups:
+//
+// * arguments specified as "--flag=value" or "--flag value" that match
+// registered or built-in Abseil Flags. These are "Abseil Flag arguments."
+// * arguments specified as "--flag" that are unrecognized as Abseil Flags
+// * arguments that are not specified as "--flag" are positional arguments
+// * arguments that follow the flag-terminating delimiter (`--`) are also
+// treated as positional arguments regardless of their syntax.
+//
+// All of the deduced Abseil Flag arguments are then parsed into their
+// corresponding flag values. If any syntax errors are found in these arguments,
+// the binary exits with code 1.
+//
+// This function also handles Abseil Flags built-in usage flags (e.g. --help)
+// if any were present on the command line.
+//
+// All the remaining positional arguments including original program name
+// (argv[0]) are are returned in the `positional_args` output parameter.
+//
+// All unrecognized flags that are not otherwise ignored are returned in the
+// `unrecognized_flags` output parameter. Note that the special `undefok`
+// flag allows you to specify flags which can be safely ignored; `undefok`
+// specifies these flags as a comma-separated list. Any unrecognized flags
+// that appear within `undefok` will therefore be ignored and not included in
+// the `unrecognized_flag` output parameter.
+//
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+ std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags);
+
+// ReportUnrecognizedFlags()
+//
+// Reports an error to `stderr` for all non-ignored unrecognized flags in
+// the provided `unrecognized_flags` list.
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags);
+
// ParseCommandLine()
//
-// Parses the set of command-line arguments passed in the `argc` (argument
-// count) and `argv[]` (argument vector) parameters from `main()`, assigning
-// values to any defined Abseil flags. (Any arguments passed after the
-// flag-terminating delimiter (`--`) are treated as positional arguments and
-// ignored.)
-//
-// Any command-line flags (and arguments to those flags) are parsed into Abseil
-// Flag values, if those flags are defined. Any undefined flags will either
-// return an error, or be ignored if that flag is designated using `undefok` to
-// indicate "undefined is OK."
-//
-// Any command-line positional arguments not part of any command-line flag (or
-// arguments to a flag) are returned in a vector, with the program invocation
-// name at position 0 of that vector. (Note that this includes positional
-// arguments after the flag-terminating delimiter `--`.)
-//
-// After all flags and flag arguments are parsed, this function looks for any
-// built-in usage flags (e.g. `--help`), and if any were specified, it reports
-// help messages and then exits the program.
+// First parses Abseil Flags only from the command line according to the
+// description in `ParseAbseilFlagsOnly`. In addition this function handles
+// unrecognized and usage flags.
+//
+// If any unrecognized flags are located they are reported using
+// `ReportUnrecognizedFlags`.
+//
+// If any errors detected during command line parsing, this routine reports a
+// usage message and aborts the program.
+//
+// If any built-in usage flags were specified on the command line (e.g.
+// `--help`), this function reports help messages and then gracefully exits the
+// program.
+//
+// This function returns all the remaining positional arguments collected by
+// `ParseAbseilFlagsOnly`.
std::vector<char*> ParseCommandLine(int argc, char* argv[]);
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage.cc
index 495bd01fa3..791d9501b8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/flags/usage.cc
@@ -21,6 +21,7 @@
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
#include "y_absl/base/const_init.h"
+#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/thread_annotations.h"
#include "y_absl/flags/internal/usage.h"
#include "y_absl/strings/string_view.h"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/any_invocable.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/any_invocable.h
index 78c3f68072..f04000578a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/any_invocable.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/any_invocable.h
@@ -266,9 +266,17 @@ class AnyInvocable : private internal_any_invocable::Impl<Sig> {
// Exchanges the targets of `*this` and `other`.
void swap(AnyInvocable& other) noexcept { std::swap(*this, other); }
- // abl::AnyInvocable::operator bool()
+ // y_absl::AnyInvocable::operator bool()
//
// Returns `true` if `*this` is not empty.
+ //
+ // WARNING: An `AnyInvocable` that wraps an empty `std::function` is not
+ // itself empty. This behavior is consistent with the standard equivalent
+ // `std::move_only_function`.
+ //
+ // In other words:
+ // std::function<void()> f; // empty
+ // y_absl::AnyInvocable<void()> a = std::move(f); // not empty
explicit operator bool() const noexcept { return this->HasValue(); }
// Invokes the target object of `*this`. `*this` must not be empty.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
index 087872d0f9..27597c486b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/bind_front.h
@@ -46,7 +46,7 @@ Y_ABSL_NAMESPACE_BEGIN
//
// Like `std::bind()`, `y_absl::bind_front()` is implicitly convertible to
// `std::function`. In particular, it may be used as a simpler replacement for
-// `std::bind()` in most cases, as it does not require placeholders to be
+// `std::bind()` in most cases, as it does not require placeholders to be
// specified. More importantly, it provides more reliable correctness guarantees
// than `std::bind()`; while `std::bind()` will silently ignore passing more
// parameters than expected, for example, `y_absl::bind_front()` will report such
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
index 78814e8960..33a0e20a26 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/function_ref.h
@@ -66,11 +66,11 @@ class FunctionRef;
// FunctionRef
//
-// An `y_absl::FunctionRef` is a lightweight wrapper to any invokable object with
+// An `y_absl::FunctionRef` is a lightweight wrapper to any invocable object with
// a compatible signature. Generally, an `y_absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const
// reference to a `std::function`. `y_absl::FunctionRef` itself does not allocate,
-// although the wrapped invokable may.
+// although the wrapped invocable may.
//
// Example:
//
@@ -98,7 +98,7 @@ class FunctionRef<R(Args...)> {
std::is_convertible<FR, R>::value>::type;
public:
- // Constructs a FunctionRef from any invokable type.
+ // Constructs a FunctionRef from any invocable type.
template <typename F, typename = EnableIfCompatible<const F&>>
// NOLINTNEXTLINE(runtime/explicit)
FunctionRef(const F& f Y_ABSL_ATTRIBUTE_LIFETIME_BOUND)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/any_invocable.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/any_invocable.h
index 5dd37858f0..f3a79186d8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/any_invocable.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/any_invocable.h
@@ -56,6 +56,7 @@
#include <cassert>
#include <cstddef>
#include <cstring>
+#include <exception>
#include <functional>
#include <initializer_list>
#include <memory>
@@ -134,8 +135,16 @@ void InvokeR(F&& f, P&&... args) {
template <class ReturnType, class F, class... P,
y_absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
ReturnType InvokeR(F&& f, P&&... args) {
+ // GCC 12 has a false-positive -Wmaybe-uninitialized warning here.
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return y_absl::base_internal::invoke(std::forward<F>(f),
std::forward<P>(args)...);
+#if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
}
//
@@ -196,7 +205,7 @@ union TypeErasedState {
template <class T>
T& ObjectInLocalStorage(TypeErasedState* const state) {
// We launder here because the storage may be reused with the same type.
-#if Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return *std::launder(reinterpret_cast<T*>(&state->storage));
#elif Y_ABSL_HAVE_BUILTIN(__builtin_launder)
return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
@@ -431,11 +440,11 @@ class CoreImpl {
CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
- enum class TargetType : int {
- kPointer = 0,
- kCompatibleAnyInvocable = 1,
- kIncompatibleAnyInvocable = 2,
- kOther = 3,
+ enum class TargetType {
+ kPointer,
+ kCompatibleAnyInvocable,
+ kIncompatibleAnyInvocable,
+ kOther,
};
// Note: QualDecayedTRef here includes the cv-ref qualifiers associated with
@@ -457,8 +466,7 @@ class CoreImpl {
// NOTE: We only use integers instead of enums as template parameters in
// order to work around a bug on C++14 under MSVC 2017.
// See b/236131881.
- Initialize<static_cast<int>(kTargetType), QualDecayedTRef>(
- std::forward<F>(f));
+ Initialize<kTargetType, QualDecayedTRef>(std::forward<F>(f));
}
// Note: QualTRef here includes the cv-ref qualifiers associated with the
@@ -487,7 +495,7 @@ class CoreImpl {
// object.
Clear();
- // Perform the actual move/destory operation on the target function.
+ // Perform the actual move/destroy operation on the target function.
other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
manager_ = other.manager_;
invoker_ = other.invoker_;
@@ -509,8 +517,8 @@ class CoreImpl {
invoker_ = nullptr;
}
- template <int target_type, class QualDecayedTRef, class F,
- y_absl::enable_if_t<target_type == 0, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ y_absl::enable_if_t<target_type == TargetType::kPointer, int> = 0>
void Initialize(F&& f) {
// This condition handles types that decay into pointers, which includes
// function references. Since function references cannot be null, GCC warns
@@ -534,8 +542,9 @@ class CoreImpl {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
- template <int target_type, class QualDecayedTRef, class F,
- y_absl::enable_if_t<target_type == 1, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ y_absl::enable_if_t<
+ target_type == TargetType::kCompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
// In this case we can "steal the guts" of the other AnyInvocable.
f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
@@ -546,8 +555,9 @@ class CoreImpl {
f.invoker_ = nullptr;
}
- template <int target_type, class QualDecayedTRef, class F,
- y_absl::enable_if_t<target_type == 2, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ y_absl::enable_if_t<
+ target_type == TargetType::kIncompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
if (f.HasValue()) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
@@ -557,8 +567,8 @@ class CoreImpl {
}
}
- template <int target_type, class QualDecayedTRef, class F,
- typename = y_absl::enable_if_t<target_type == 3>>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ typename = y_absl::enable_if_t<target_type == TargetType::kOther>>
void Initialize(F&& f) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
@@ -810,19 +820,22 @@ using CanAssignReferenceWrapper = TrueAlias<
: Core(y_absl::in_place_type<y_absl::decay_t<T> inv_quals>, \
std::forward<Args>(args)...) {} \
\
+ /*Raises a fatal error when the AnyInvocable is invoked after a move*/ \
+ static ReturnType InvokedAfterMove( \
+ TypeErasedState*, \
+ ForwardedParameterType<P>...) noexcept(noex) { \
+ Y_ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
+ std::terminate(); \
+ } \
+ \
InvokerType<noex, ReturnType, P...>* ExtractInvoker() cv { \
using QualifiedTestType = int cv ref; \
auto* invoker = this->invoker_; \
if (!std::is_const<QualifiedTestType>::value && \
std::is_rvalue_reference<QualifiedTestType>::value) { \
- Y_ABSL_HARDENING_ASSERT([this]() { \
+ Y_ABSL_ASSERT([this]() { \
/* We checked that this isn't const above, so const_cast is safe */ \
- const_cast<Impl*>(this)->invoker_ = \
- [](TypeErasedState*, \
- ForwardedParameterType<P>...) noexcept(noex) -> ReturnType { \
- Y_ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
- std::terminate(); \
- }; \
+ const_cast<Impl*>(this)->invoker_ = InvokedAfterMove; \
return this->HasValue(); \
}()); \
} \
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h
index a6be684ffa..2eb6719ec6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/internal/function_ref.h
@@ -20,6 +20,7 @@
#include <type_traits>
#include "y_absl/base/internal/invoke.h"
+#include "y_absl/functional/any_invocable.h"
#include "y_absl/meta/type_traits.h"
namespace y_absl {
@@ -40,18 +41,21 @@ union VoidPtr {
// Chooses the best type for passing T as an argument.
// Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are
// passed by value.
+template <typename T,
+ bool IsLValueReference = std::is_lvalue_reference<T>::value>
+struct PassByValue : std::false_type {};
+
template <typename T>
-constexpr bool PassByValue() {
- return !std::is_lvalue_reference<T>::value &&
- y_absl::is_trivially_copy_constructible<T>::value &&
- y_absl::is_trivially_copy_assignable<
- typename std::remove_cv<T>::type>::value &&
- std::is_trivially_destructible<T>::value &&
- sizeof(T) <= 2 * sizeof(void*);
-}
+struct PassByValue<T, /*IsLValueReference=*/false>
+ : std::integral_constant<bool,
+ y_absl::is_trivially_copy_constructible<T>::value &&
+ y_absl::is_trivially_copy_assignable<
+ typename std::remove_cv<T>::type>::value &&
+ std::is_trivially_destructible<T>::value &&
+ sizeof(T) <= 2 * sizeof(void*)> {};
template <typename T>
-struct ForwardT : std::conditional<PassByValue<T>(), T, T&&> {};
+struct ForwardT : std::conditional<PassByValue<T>::value, T, T&&> {};
// An Invoker takes a pointer to the type-erased invokable object, followed by
// the arguments that the invokable object expects.
@@ -87,6 +91,12 @@ void AssertNonNull(const std::function<Sig>& f) {
(void)f;
}
+template <typename Sig>
+void AssertNonNull(const AnyInvocable<Sig>& f) {
+ assert(f != nullptr);
+ (void)f;
+}
+
template <typename F>
void AssertNonNull(const F&) {}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make
index 26a3c44152..8981153caa 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/functional/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20230125.3)
+VERSION(20230802.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230125.3.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230802.0.tar.gz)
NO_RUNTIME()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
index 00a359d224..2dcb3c0d02 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/hash.h
@@ -42,7 +42,7 @@
//
// `y_absl::Hash` may also produce different values from different dynamically
// loaded libraries. For this reason, `y_absl::Hash` values must never cross
-// boundries in dynamically loaded libraries (including when used in types like
+// boundaries in dynamically loaded libraries (including when used in types like
// hash containers.)
//
// `y_absl::Hash` is intended to strongly mix input bits with a target of passing
@@ -110,9 +110,12 @@ Y_ABSL_NAMESPACE_BEGIN
// * std::unique_ptr and std::shared_ptr
// * All string-like types including:
// * y_absl::Cord
-// * TString
-// * std::string_view (as well as any instance of std::basic_string that
-// uses char and std::char_traits)
+// * TString (as well as any instance of std::basic_string that
+// uses one of {char, wchar_t, char16_t, char32_t} and its associated
+// std::char_traits)
+// * std::string_view (as well as any instance of std::basic_string_view
+// that uses one of {char, wchar_t, char16_t, char32_t} and its associated
+// std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
// * All the standard associative containers (provided the elements are
// hashable)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
index 5b559ffd0d..8250eba6c2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/hash.h
@@ -56,6 +56,10 @@
#include "y_absl/types/variant.h"
#include "y_absl/utility/utility.h"
+#ifdef Y_ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -424,7 +428,7 @@ H AbslHashValue(H hash_state, std::nullptr_t) {
// AbslHashValue() for hashing pointers-to-member
template <typename H, typename T, typename C>
-H AbslHashValue(H hash_state, T C::* ptr) {
+H AbslHashValue(H hash_state, T C::*ptr) {
auto salient_ptm_size = [](std::size_t n) -> std::size_t {
#if defined(_MSC_VER)
// Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
@@ -442,8 +446,8 @@ H AbslHashValue(H hash_state, T C::* ptr) {
return n == 24 ? 20 : n == 16 ? 12 : n;
}
#else
- // On other platforms, we assume that pointers-to-members do not have
- // padding.
+ // On other platforms, we assume that pointers-to-members do not have
+ // padding.
#ifdef __cpp_lib_has_unique_object_representations
static_assert(std::has_unique_object_representations<T C::*>::value);
#endif // __cpp_lib_has_unique_object_representations
@@ -516,14 +520,15 @@ H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
// the same character sequence. These types are:
//
// - `y_absl::Cord`
-// - `TString` (and std::basic_string<char, std::char_traits<char>, A> for
-// any allocator A)
-// - `y_absl::string_view` and `std::string_view`
+// - `TString` (and std::basic_string<T, std::char_traits<T>, A> for
+// any allocator A and any T in {char, wchar_t, char16_t, char32_t})
+// - `y_absl::string_view`, `std::string_view`, `std::wstring_view`,
+// `std::u16string_view`, and `std::u32_string_view`.
//
-// For simplicity, we currently support only `char` strings. This support may
-// be broadened, if necessary, but with some caution - this overload would
-// misbehave in cases where the traits' `eq()` member isn't equivalent to `==`
-// on the underlying character type.
+// For simplicity, we currently support only strings built on `char`, `wchar_t`,
+// `char16_t`, or `char32_t`. This support may be broadened, if necessary, but
+// with some caution - this overload would misbehave in cases where the traits'
+// `eq()` member isn't equivalent to `==` on the underlying character type.
template <typename H>
H AbslHashValue(H hash_state, y_absl::string_view str) {
return H::combine(
@@ -544,6 +549,21 @@ H AbslHashValue(
str.size());
}
+#ifdef Y_ABSL_HAVE_STD_STRING_VIEW
+
+// Support std::wstring_view, std::u16string_view and std::u32string_view.
+template <typename Char, typename H,
+ typename = y_absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
+ std::is_same<Char, char16_t>::value ||
+ std::is_same<Char, char32_t>::value>>
+H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
+ return H::combine(
+ H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
+ str.size());
+}
+
+#endif // Y_ABSL_HAVE_STD_STRING_VIEW
+
// -----------------------------------------------------------------------------
// AbslHashValue for Sequence Containers
// -----------------------------------------------------------------------------
@@ -935,8 +955,8 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
#endif // Y_ABSL_HAVE_INTRINSIC_INT128
static constexpr uint64_t kMul =
- sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
- : uint64_t{0x9ddfea08eb382d69};
+ sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
+ : uint64_t{0x9ddfea08eb382d69};
template <typename T>
using IntegralFastPath =
@@ -969,7 +989,8 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// The result should be the same as running the whole algorithm, but faster.
template <typename T, y_absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
static size_t hash(T value) {
- return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
+ return static_cast<size_t>(
+ Mix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));
}
// Overload of MixingHashState::hash()
@@ -1073,6 +1094,7 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
+ // The trick used by this implementation is to avoid branches if possible.
unsigned char mem0 = p[0];
unsigned char mem1 = p[len / 2];
unsigned char mem2 = p[len - 1];
@@ -1082,7 +1104,7 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
unsigned char significant0 = mem0;
#else
unsigned char significant2 = mem0;
- unsigned char significant1 = mem1;
+ unsigned char significant1 = len == 2 ? mem0 : mem1;
unsigned char significant0 = mem2;
#endif
return static_cast<uint32_t>(significant0 | //
@@ -1135,7 +1157,8 @@ class Y_ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// probably per-build and not per-process.
Y_ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
#if (!defined(__clang__) || __clang_major__ > 11) && \
- !defined(__apple_build_version__)
+ (!defined(__apple_build_version__) || \
+ __apple_build_version__ >= 19558921) // Xcode 12
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
#else
// Workaround the absence of
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc
index 272e007eb9..4c491215d9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/hash/internal/low_level_hash.cc
@@ -15,6 +15,7 @@
#include "y_absl/hash/internal/low_level_hash.h"
#include "y_absl/base/internal/unaligned_access.h"
+#include "y_absl/base/prefetch.h"
#include "y_absl/numeric/int128.h"
namespace y_absl {
@@ -29,6 +30,8 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
+ // Prefetch the cacheline that data resides in.
+ PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
uint64_t current_state = seed ^ salt[0];
@@ -40,6 +43,9 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
uint64_t duplicated_state = current_state;
do {
+ // Always prefetch the next cacheline.
+ PrefetchToLocalCache(ptr + Y_ABSL_CACHELINE_SIZE);
+
uint64_t a = y_absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = y_absl::base_internal::UnalignedLoad64(ptr + 8);
uint64_t c = y_absl::base_internal::UnalignedLoad64(ptr + 16);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make
index 30722e54e4..0c76143089 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/memory/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20230125.3)
+VERSION(20230802.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230125.3.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230802.0.tar.gz)
PEERDIR(
contrib/restricted/abseil-cpp-tstring/y_absl/meta
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h
index 0ffbcbee27..0bec59b056 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/type_traits.h
@@ -39,14 +39,9 @@
#include <functional>
#include <type_traits>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
-// MSVC constructibility traits do not detect destructor properties and so our
-// implementations should not use them as a source-of-truth.
-#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
-#define Y_ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
-#endif
-
// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
// feature.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
@@ -58,57 +53,8 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_destructible;
-
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_move_assignable;
-
namespace type_traits_internal {
-// Silence MSVC warnings about the destructor being defined as deleted.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(push)
-#pragma warning(disable : 4624)
-#endif // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-union SingleMemberUnion {
- T t;
-};
-
-// Restore the state of the destructor warning that was silenced above.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(pop)
-#endif // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-struct IsTriviallyMoveConstructibleObject
- : std::integral_constant<
- bool, std::is_move_constructible<
- type_traits_internal::SingleMemberUnion<T>>::value &&
- y_absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyCopyConstructibleObject
- : std::integral_constant<
- bool, std::is_copy_constructible<
- type_traits_internal::SingleMemberUnion<T>>::value &&
- y_absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference : std::false_type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&>
- : y_absl::is_trivially_move_assignable<T>::type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&&>
- : y_absl::is_trivially_move_assignable<T>::type {};
-
template <typename... Ts>
struct VoidTImpl {
using type = void;
@@ -157,39 +103,8 @@ template <class To, template <class...> class Op, class... Args>
struct is_detected_convertible
: is_detected_convertible_impl<void, To, Op, Args...>::type {};
-template <typename T>
-using IsCopyAssignableImpl =
- decltype(std::declval<T&>() = std::declval<const T&>());
-
-template <typename T>
-using IsMoveAssignableImpl = decltype(std::declval<T&>() = std::declval<T&&>());
-
} // namespace type_traits_internal
-// MSVC 19.20 has a regression that causes our workarounds to fail, but their
-// std forms now appear to be compliant.
-#if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1920)
-
-template <typename T>
-using is_copy_assignable = std::is_copy_assignable<T>;
-
-template <typename T>
-using is_move_assignable = std::is_move_assignable<T>;
-
-#else
-
-template <typename T>
-struct is_copy_assignable : type_traits_internal::is_detected<
- type_traits_internal::IsCopyAssignableImpl, T> {
-};
-
-template <typename T>
-struct is_move_assignable : type_traits_internal::is_detected<
- type_traits_internal::IsMoveAssignableImpl, T> {
-};
-
-#endif
-
// void_t()
//
// Ignores the type of any its arguments and returns `void`. In general, this
@@ -270,246 +185,29 @@ struct is_function
bool, !(std::is_reference<T>::value ||
std::is_const<typename std::add_const<T>::type>::value)> {};
+// is_copy_assignable()
+// is_move_assignable()
// is_trivially_destructible()
-//
-// Determines whether the passed type `T` is trivially destructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_destructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: the extensions (__has_trivial_xxx) are implemented in gcc (version >=
-// 4.3) and clang. Since we are supporting libstdc++ > 4.7, they should always
-// be present. These extensions are documented at
-// https://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html#Type-Traits.
-template <typename T>
-struct is_trivially_destructible
-#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
- : std::is_trivially_destructible<T> {
-#else
- : std::integral_constant<bool, __has_trivial_destructor(T) &&
- std::is_destructible<T>::value> {
-#endif
-#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
- private:
- static constexpr bool compliant = std::is_trivially_destructible<T>::value ==
- is_trivially_destructible::value;
- static_assert(compliant || std::is_trivially_destructible<T>::value,
- "Not compliant with std::is_trivially_destructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_destructible<T>::value,
- "Not compliant with std::is_trivially_destructible; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
-};
-
// is_trivially_default_constructible()
-//
-// Determines whether the passed type `T` is trivially default constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_default_constructible()` metafunction for platforms that
-// have incomplete C++11 support (such as libstdc++ 4.x). On any platforms that
-// do fully support C++11, we check whether this yields the same result as the
-// std implementation.
-//
-// NOTE: according to the C++ standard, Section: 20.15.4.3 [meta.unary.prop]
-// "The predicate condition for a template specialization is_constructible<T,
-// Args...> shall be satisfied if and only if the following variable
-// definition would be well-formed for some invented variable t:
-//
-// T t(declval<Args>()...);
-//
-// is_trivially_constructible<T, Args...> additionally requires that the
-// variable definition does not call any operation that is not trivial.
-// For the purposes of this check, the call to std::declval is considered
-// trivial."
-//
-// Notes from https://en.cppreference.com/w/cpp/types/is_constructible:
-// In many implementations, is_nothrow_constructible also checks if the
-// destructor throws because it is effectively noexcept(T(arg)). Same
-// applies to is_trivially_constructible, which, in these implementations, also
-// requires that the destructor is trivial.
-// GCC bug 51452: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452
-// LWG issue 2116: http://cplusplus.github.io/LWG/lwg-active.html#2116.
-//
-// "T obj();" need to be well-formed and not call any nontrivial operation.
-// Nontrivially destructible types will cause the expression to be nontrivial.
-template <typename T>
-struct is_trivially_default_constructible
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
- : std::is_trivially_default_constructible<T> {
-#else
- : std::integral_constant<bool, __has_trivial_constructor(T) &&
- std::is_default_constructible<T>::value &&
- is_trivially_destructible<T>::value> {
-#endif
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- Y_ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_default_constructible<T>::value ==
- is_trivially_default_constructible::value;
- static_assert(compliant || std::is_trivially_default_constructible<T>::value,
- "Not compliant with std::is_trivially_default_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_default_constructible<T>::value,
- "Not compliant with std::is_trivially_default_constructible; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_move_constructible()
-//
-// Determines whether the passed type `T` is trivially move constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<T>());` needs to be well-formed and not call any
-// nontrivial operation. Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_move_constructible
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
- : std::is_trivially_move_constructible<T> {
-#else
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value,
- type_traits_internal::IsTriviallyMoveConstructibleObject<T>,
- std::is_reference<T>>::type::type {
-#endif
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- Y_ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_move_constructible<T>::value ==
- is_trivially_move_constructible::value;
- static_assert(compliant || std::is_trivially_move_constructible<T>::value,
- "Not compliant with std::is_trivially_move_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_move_constructible<T>::value,
- "Not compliant with std::is_trivially_move_constructible; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_copy_constructible()
-//
-// Determines whether the passed type `T` is trivially copy constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<const T&>());` needs to be well-formed and not call any
-// nontrivial operation. Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_copy_constructible
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value,
- type_traits_internal::IsTriviallyCopyConstructibleObject<T>,
- std::is_lvalue_reference<T>>::type::type {
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- Y_ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_copy_constructible<T>::value ==
- is_trivially_copy_constructible::value;
- static_assert(compliant || std::is_trivially_copy_constructible<T>::value,
- "Not compliant with std::is_trivially_copy_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_copy_constructible<T>::value,
- "Not compliant with std::is_trivially_copy_constructible; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_move_assignable()
-//
-// Determines whether the passed type `T` is trivially move assignable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, T>`.
-template <typename T>
-struct is_trivially_move_assignable
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value &&
- std::is_move_assignable<T>::value,
- std::is_move_assignable<type_traits_internal::SingleMemberUnion<T>>,
- type_traits_internal::IsTriviallyMoveAssignableReference<T>>::type::
- type {
-#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
- static constexpr bool compliant =
- std::is_trivially_move_assignable<T>::value ==
- is_trivially_move_assignable::value;
- static_assert(compliant || std::is_trivially_move_assignable<T>::value,
- "Not compliant with std::is_trivially_move_assignable; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_move_assignable<T>::value,
- "Not compliant with std::is_trivially_move_assignable; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
-
// is_trivially_copy_assignable()
//
-// Determines whether the passed type `T` is trivially copy assignable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, const T&>`.
-template <typename T>
-struct is_trivially_copy_assignable
-#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- : std::is_trivially_copy_assignable<T> {
-#else
- : std::integral_constant<
- bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
- y_absl::is_copy_assignable<T>::value> {
-#endif
-#ifdef Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
- static constexpr bool compliant =
- std::is_trivially_copy_assignable<T>::value ==
- is_trivially_copy_assignable::value;
- static_assert(compliant || std::is_trivially_copy_assignable<T>::value,
- "Not compliant with std::is_trivially_copy_assignable; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_copy_assignable<T>::value,
- "Not compliant with std::is_trivially_copy_assignable; "
- "Standard: true, Implementation: false");
-#endif // Y_ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
+// Historical note: Abseil once provided implementations of these type traits
+// for platforms that lacked full support. New code should prefer to use the
+// std variants.
+//
+// See the documentation for the STL <type_traits> header for more information:
+// https://en.cppreference.com/w/cpp/header/type_traits
+using std::is_copy_assignable;
+using std::is_move_assignable;
+using std::is_trivially_copy_assignable;
+using std::is_trivially_copy_constructible;
+using std::is_trivially_default_constructible;
+using std::is_trivially_destructible;
+using std::is_trivially_move_assignable;
+using std::is_trivially_move_constructible;
#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
template <typename T>
@@ -532,55 +230,6 @@ template <typename T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
-namespace type_traits_internal {
-// is_trivially_copyable()
-//
-// Determines whether the passed type `T` is trivially copyable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copyable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). We use the C++17 definition
-// of TriviallyCopyable.
-//
-// NOTE: `is_trivially_copyable<T>::value` is `true` if all of T's copy/move
-// constructors/assignment operators are trivial or deleted, T has at least
-// one non-deleted copy/move constructor/assignment operator, and T is trivially
-// destructible. Arrays of trivially copyable types are trivially copyable.
-//
-// We expose this metafunction only for internal use within y_absl.
-
-#if defined(Y_ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE)
-template <typename T>
-struct is_trivially_copyable : std::is_trivially_copyable<T> {};
-#else
-template <typename T>
-class is_trivially_copyable_impl {
- using ExtentsRemoved = typename std::remove_all_extents<T>::type;
- static constexpr bool kIsCopyOrMoveConstructible =
- std::is_copy_constructible<ExtentsRemoved>::value ||
- std::is_move_constructible<ExtentsRemoved>::value;
- static constexpr bool kIsCopyOrMoveAssignable =
- y_absl::is_copy_assignable<ExtentsRemoved>::value ||
- y_absl::is_move_assignable<ExtentsRemoved>::value;
-
- public:
- static constexpr bool kValue =
- (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) &&
- (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) &&
- (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) &&
- is_trivially_destructible<ExtentsRemoved>::value &&
- // We need to check for this explicitly because otherwise we'll say
- // references are trivial copyable when compiled by MSVC.
- !std::is_reference<ExtentsRemoved>::value;
-};
-
-template <typename T>
-struct is_trivially_copyable
- : std::integral_constant<
- bool, type_traits_internal::is_trivially_copyable_impl<T>::kValue> {};
-#endif
-} // namespace type_traits_internal
-
// -----------------------------------------------------------------------------
// C++14 "_t" trait aliases
// -----------------------------------------------------------------------------
@@ -630,6 +279,7 @@ using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
namespace type_traits_internal {
// This trick to retrieve a default alignment is necessary for our
// implementation of aligned_storage_t to be consistent with any
@@ -648,6 +298,7 @@ struct default_alignment_of_aligned_storage<
template <size_t Len, size_t Align = type_traits_internal::
default_alignment_of_aligned_storage<Len>::value>
using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
template <typename T>
using decay_t = typename std::decay<T>::type;
@@ -818,9 +469,14 @@ using swap_internal::StdSwapIsUnconstrained;
} // namespace type_traits_internal
// y_absl::is_trivially_relocatable<T>
-// Detects whether a type is "trivially relocatable" -- meaning it can be
-// relocated without invoking the constructor/destructor, using a form of move
-// elision.
+//
+// Detects whether a type is known to be "trivially relocatable" -- meaning it
+// can be relocated without invoking the constructor/destructor, using a form of
+// move elision.
+//
+// This trait is conservative, for backwards compatibility. If it's true then
+// the type is definitely trivially relocatable, but if it's false then the type
+// may or may not be.
//
// Example:
//
@@ -834,14 +490,33 @@ using swap_internal::StdSwapIsUnconstrained;
// Upstream documentation:
//
// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable
+
+// If the compiler offers a builtin that tells us the answer, we can use that.
+// This covers all of the cases in the fallback below, plus types that opt in
+// using e.g. [[clang::trivial_abi]].
//
-#if Y_ABSL_HAVE_BUILTIN(__is_trivially_relocatable)
+// Clang on Windows has the builtin, but it falsely claims types with a
+// user-provided destructor are trivial (http://b/275003464). So we opt out
+// there.
+//
+// TODO(b/275003464): remove the opt-out once the bug is fixed.
+//
+// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
+// work with NVCC either.
+#if Y_ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+ !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
+ !defined(__NVCC__)
template <class T>
struct is_trivially_relocatable
: std::integral_constant<bool, __is_trivially_relocatable(T)> {};
#else
+// Otherwise we use a fallback that detects only those types we can feasibly
+// detect. Any time that has trivial move-construction and destruction
+// operations is by definition trivially relocatable.
template <class T>
-struct is_trivially_relocatable : std::integral_constant<bool, false> {};
+struct is_trivially_relocatable
+ : y_absl::conjunction<y_absl::is_trivially_move_constructible<T>,
+ y_absl::is_trivially_destructible<T>> {};
#endif
// y_absl::is_constant_evaluated()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make
index 1660ae53fc..8235a4d395 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/meta/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20230125.3)
+VERSION(20230802.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230125.3.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230802.0.tar.gz)
PEERDIR(
contrib/restricted/abseil-cpp-tstring/y_absl/base
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
index 501c756da5..2d20293c90 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/bits.h
@@ -38,19 +38,19 @@
#include <limits>
#include <type_traits>
-#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
- (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+#include "y_absl/base/config.h"
+
+#if Y_ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
#include <bit>
#endif
#include "y_absl/base/attributes.h"
-#include "y_absl/base/config.h"
#include "y_absl/numeric/internal/bits.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-
#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+
// rotating
template <class T>
Y_ABSL_MUST_USE_RESULT constexpr
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
index a054aad2c1..fa62f6ad50 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.cc
@@ -111,7 +111,7 @@ uint128 MakeUint128FromFloat(T v) {
return MakeUint128(0, static_cast<uint64_t>(v));
}
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
// Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
// Casting from long double to uint64_t is miscompiled and drops bits.
// It is more work, so only use when we need the workaround.
@@ -131,7 +131,7 @@ uint128 MakeUint128FromFloat(long double v) {
return (static_cast<uint128>(w0) << 100) | (static_cast<uint128>(w1) << 50) |
static_cast<uint128>(w2);
}
-#endif // __clang__ && !__SSE3__
+#endif // __clang__ && (__clang_major__ < 9) && !__SSE3__
} // namespace
uint128::uint128(float v) : uint128(MakeUint128FromFloat(v)) {}
@@ -202,6 +202,10 @@ TString Uint128ToFormattedString(uint128 v, std::ios_base::fmtflags flags) {
} // namespace
+TString uint128::ToString() const {
+ return Uint128ToFormattedString(*this, std::ios_base::dec);
+}
+
std::ostream& operator<<(std::ostream& os, uint128 v) {
std::ios_base::fmtflags flags = os.flags();
TString rep = Uint128ToFormattedString(v, flags);
@@ -216,9 +220,9 @@ std::ostream& operator<<(std::ostream& os, uint128 v) {
} else if (adjustfield == std::ios::internal &&
(flags & std::ios::showbase) &&
(flags & std::ios::basefield) == std::ios::hex && v != 0) {
- rep.insert((size_t)2, count, os.fill());
+ rep.insert(size_t{2}, count, os.fill());
} else {
- rep.insert((size_t)0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
}
}
@@ -285,6 +289,14 @@ int128 operator%(int128 lhs, int128 rhs) {
}
#endif // Y_ABSL_HAVE_INTRINSIC_INT128
+TString int128::ToString() const {
+ TString rep;
+ if (Int128High64(*this) < 0) rep = "-";
+ rep.append(Uint128ToFormattedString(UnsignedAbsoluteValue(*this),
+ std::ios_base::dec));
+ return rep;
+}
+
std::ostream& operator<<(std::ostream& os, int128 v) {
std::ios_base::fmtflags flags = os.flags();
TString rep;
@@ -314,16 +326,16 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
break;
case std::ios::internal:
if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) {
- rep.insert(1u, count, os.fill());
+ rep.insert(size_t{1}, count, os.fill());
} else if ((flags & std::ios::basefield) == std::ios::hex &&
(flags & std::ios::showbase) && v != 0) {
- rep.insert((size_t)2, count, os.fill());
+ rep.insert(size_t{2}, count, os.fill());
} else {
- rep.insert((size_t)0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
}
break;
default: // std::ios::right
- rep.insert((size_t)0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
break;
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
index 34c920a032..d90efdaece 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128.h
@@ -32,6 +32,7 @@
#include <cstring>
#include <iosfwd>
#include <limits>
+#include <util/generic/string.h>
#include <utility>
#include "y_absl/base/config.h"
@@ -119,8 +120,8 @@ class
#ifdef Y_ABSL_HAVE_INTRINSIC_INT128
constexpr uint128(__int128 v); // NOLINT(runtime/explicit)
constexpr uint128(unsigned __int128 v); // NOLINT(runtime/explicit)
-#endif // Y_ABSL_HAVE_INTRINSIC_INT128
- constexpr uint128(int128 v); // NOLINT(runtime/explicit)
+#endif // Y_ABSL_HAVE_INTRINSIC_INT128
+ constexpr uint128(int128 v); // NOLINT(runtime/explicit)
explicit uint128(float v);
explicit uint128(double v);
explicit uint128(long double v);
@@ -217,9 +218,17 @@ class
return H::combine(std::move(h), Uint128High64(v), Uint128Low64(v));
}
+ // Support for y_absl::StrCat() etc.
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, uint128 v) {
+ sink.Append(v.ToString());
+ }
+
private:
constexpr uint128(uint64_t high, uint64_t low);
+ TString ToString() const;
+
// TODO(strel) Update implementation to use __int128 once all users of
// uint128 are fixed to not depend on alignof(uint128) == 8. Also add
// alignas(16) to class definition to keep alignment consistent across
@@ -286,9 +295,9 @@ class numeric_limits<y_absl::uint128> {
#endif // Y_ABSL_HAVE_INTRINSIC_INT128
static constexpr bool tinyness_before = false;
- static constexpr y_absl::uint128 (min)() { return 0; }
+ static constexpr y_absl::uint128(min)() { return 0; }
static constexpr y_absl::uint128 lowest() { return 0; }
- static constexpr y_absl::uint128 (max)() { return y_absl::Uint128Max(); }
+ static constexpr y_absl::uint128(max)() { return y_absl::Uint128Max(); }
static constexpr y_absl::uint128 epsilon() { return 0; }
static constexpr y_absl::uint128 round_error() { return 0; }
static constexpr y_absl::uint128 infinity() { return 0; }
@@ -454,9 +463,17 @@ class int128 {
return H::combine(std::move(h), Int128High64(v), Int128Low64(v));
}
+ // Support for y_absl::StrCat() etc.
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, int128 v) {
+ sink.Append(v.ToString());
+ }
+
private:
constexpr int128(int64_t high, uint64_t low);
+ TString ToString() const;
+
#if defined(Y_ABSL_HAVE_INTRINSIC_INT128)
__int128 v_;
#else // Y_ABSL_HAVE_INTRINSIC_INT128
@@ -521,9 +538,9 @@ class numeric_limits<y_absl::int128> {
#endif // Y_ABSL_HAVE_INTRINSIC_INT128
static constexpr bool tinyness_before = false;
- static constexpr y_absl::int128 (min)() { return y_absl::Int128Min(); }
+ static constexpr y_absl::int128(min)() { return y_absl::Int128Min(); }
static constexpr y_absl::int128 lowest() { return y_absl::Int128Min(); }
- static constexpr y_absl::int128 (max)() { return y_absl::Int128Max(); }
+ static constexpr y_absl::int128(max)() { return y_absl::Int128Max(); }
static constexpr y_absl::int128 epsilon() { return 0; }
static constexpr y_absl::int128 round_error() { return 0; }
static constexpr y_absl::int128 infinity() { return 0; }
@@ -561,9 +578,7 @@ inline uint128& uint128::operator=(unsigned long v) {
}
// NOLINTNEXTLINE(runtime/int)
-inline uint128& uint128::operator=(long long v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(long long v) { return *this = uint128(v); }
// NOLINTNEXTLINE(runtime/int)
inline uint128& uint128::operator=(unsigned long long v) {
@@ -571,18 +586,14 @@ inline uint128& uint128::operator=(unsigned long long v) {
}
#ifdef Y_ABSL_HAVE_INTRINSIC_INT128
-inline uint128& uint128::operator=(__int128 v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(__int128 v) { return *this = uint128(v); }
inline uint128& uint128::operator=(unsigned __int128 v) {
return *this = uint128(v);
}
#endif // Y_ABSL_HAVE_INTRINSIC_INT128
-inline uint128& uint128::operator=(int128 v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(int128 v) { return *this = uint128(v); }
// Arithmetic operators.
@@ -637,8 +648,7 @@ constexpr uint64_t Uint128High64(uint128 v) { return v.hi_; }
#if defined(Y_ABSL_IS_LITTLE_ENDIAN)
-constexpr uint128::uint128(uint64_t high, uint64_t low)
- : lo_{low}, hi_{high} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : lo_{low}, hi_{high} {}
constexpr uint128::uint128(int v)
: lo_{static_cast<uint64_t>(v)},
@@ -670,8 +680,7 @@ constexpr uint128::uint128(int128 v)
#elif defined(Y_ABSL_IS_BIG_ENDIAN)
-constexpr uint128::uint128(uint64_t high, uint64_t low)
- : hi_{high}, lo_{low} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : hi_{high}, lo_{low} {}
constexpr uint128::uint128(int v)
: hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
@@ -817,13 +826,9 @@ constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
// Unary operators.
-constexpr inline uint128 operator+(uint128 val) {
- return val;
-}
+constexpr inline uint128 operator+(uint128 val) { return val; }
-constexpr inline int128 operator+(int128 val) {
- return val;
-}
+constexpr inline int128 operator+(int128 val) { return val; }
constexpr uint128 operator-(uint128 val) {
#if defined(Y_ABSL_HAVE_INTRINSIC_INT128)
@@ -906,7 +911,7 @@ constexpr uint128 operator<<(uint128 lhs, int amount) {
#else
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
- return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
+ return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
: amount == 0 ? lhs
: MakeUint128((Uint128High64(lhs) << amount) |
(Uint128Low64(lhs) >> (64 - amount)),
@@ -920,7 +925,7 @@ constexpr uint128 operator>>(uint128 lhs, int amount) {
#else
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
- return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
+ return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
: amount == 0 ? lhs
: MakeUint128(Uint128High64(lhs) >> amount,
(Uint128Low64(lhs) >> amount) |
@@ -1042,27 +1047,19 @@ constexpr int128 MakeInt128(int64_t high, uint64_t low) {
}
// Assignment from integer types.
-inline int128& int128::operator=(int v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(int v) { return *this = int128(v); }
-inline int128& int128::operator=(unsigned int v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned int v) { return *this = int128(v); }
inline int128& int128::operator=(long v) { // NOLINT(runtime/int)
return *this = int128(v);
}
// NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(unsigned long v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned long v) { return *this = int128(v); }
// NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(long long v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(long long v) { return *this = int128(v); }
// NOLINTNEXTLINE(runtime/int)
inline int128& int128::operator=(unsigned long long v) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc
index 55f2026785..7133611d8f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_have_intrinsic.inc
@@ -162,9 +162,6 @@ inline int128::operator long double() const {
}
#else // Clang on PowerPC
-// Forward declaration for conversion operators to floating point types.
-constexpr int128 operator-(int128 v);
-constexpr bool operator!=(int128 lhs, int128 rhs);
inline int128::operator float() const {
// We must convert the absolute value and then negate as needed, because
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc
index 8354bd5615..e41d2a55e6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/int128_no_intrinsic.inc
@@ -23,8 +23,7 @@ constexpr int64_t Int128High64(int128 v) { return v.hi_; }
#if defined(Y_ABSL_IS_LITTLE_ENDIAN)
-constexpr int128::int128(int64_t high, uint64_t low) :
- lo_(low), hi_(high) {}
+constexpr int128::int128(int64_t high, uint64_t low) : lo_(low), hi_(high) {}
constexpr int128::int128(int v)
: lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
@@ -44,8 +43,7 @@ constexpr int128::int128(uint128 v)
#elif defined(Y_ABSL_IS_BIG_ENDIAN)
-constexpr int128::int128(int64_t high, uint64_t low) :
- hi_{high}, lo_{low} {}
+constexpr int128::int128(int64_t high, uint64_t low) : hi_{high}, lo_{low} {}
constexpr int128::int128(int v)
: hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
@@ -279,33 +277,52 @@ constexpr int128 operator^(int128 lhs, int128 rhs) {
}
constexpr int128 operator<<(int128 lhs, int amount) {
- // int64_t shifts of >= 64 are undefined, so we need some special-casing.
- return amount >= 64
- ? MakeInt128(
- static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)), 0)
- : amount == 0
- ? lhs
- : MakeInt128(
- (Int128High64(lhs) << amount) |
- static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
- Int128Low64(lhs) << amount);
+ // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+ assert(amount >= 0 && amount < 127);
+ if (amount <= 0) {
+ return lhs;
+ } else if (amount < 63) {
+ return MakeInt128(
+ (Int128High64(lhs) << amount) |
+ static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
+ Int128Low64(lhs) << amount);
+ } else if (amount == 63) {
+ return MakeInt128(((Int128High64(lhs) << 32) << 31) |
+ static_cast<int64_t>(Int128Low64(lhs) >> 1),
+ (Int128Low64(lhs) << 32) << 31);
+ } else if (amount == 127) {
+ return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << 63), 0);
+ } else if (amount > 127) {
+ return MakeInt128(0, 0);
+ } else {
+ // amount >= 64 && amount < 127
+ return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)),
+ 0);
+ }
}
constexpr int128 operator>>(int128 lhs, int amount) {
- // int64_t shifts of >= 64 are undefined, so we need some special-casing.
- // The (Int128High64(lhs) >> 32) >> 32 "trick" causes the the most significant
- // int64 to be inititialized with all zeros or all ones correctly. It takes
- // into account whether the number is negative or positive, and whether the
- // current architecture does arithmetic or logical right shifts for negative
- // numbers.
- return amount >= 64
- ? MakeInt128(
- (Int128High64(lhs) >> 32) >> 32,
- static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)))
- : amount == 0
- ? lhs
- : MakeInt128(Int128High64(lhs) >> amount,
- (Int128Low64(lhs) >> amount) |
- (static_cast<uint64_t>(Int128High64(lhs))
- << (64 - amount)));
+ // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+ assert(amount >= 0 && amount < 127);
+ if (amount <= 0) {
+ return lhs;
+ } else if (amount < 63) {
+ return MakeInt128(
+ Int128High64(lhs) >> amount,
+ Int128Low64(lhs) >> amount | static_cast<uint64_t>(Int128High64(lhs))
+ << (64 - amount));
+ } else if (amount == 63) {
+ return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>(Int128High64(lhs) << 1) |
+ (Int128Low64(lhs) >> 32) >> 31);
+
+ } else if (amount >= 127) {
+ return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>((Int128High64(lhs) >> 32) >> 31));
+ } else {
+ // amount >= 64 && amount < 127
+ return MakeInt128(
+ (Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)));
+ }
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/distribution_test_util.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/distribution_test_util.cc
index aab20d9206..ca8d183707 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/distribution_test_util.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/distribution_test_util.cc
@@ -213,7 +213,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
double result = 1.;
int ns = static_cast<int>(q + xc * psq);
- // Use the soper reduction forumla.
+ // Use the soper reduction formula.
double rx = (ns == 0) ? x : x / xc;
double temp = q - ai;
for (;;) {
@@ -236,7 +236,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
}
}
- // NOTE: See also TOMS Alogrithm 708.
+ // NOTE: See also TOMS Algorithm 708.
// http://www.netlib.org/toms/index.html
//
// NOTE: The NWSC library also includes BRATIO / ISUBX (p87)
@@ -247,7 +247,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
// https://www.jstor.org/stable/2346798?read-now=1&seq=4#page_scan_tab_contents
// https://www.jstor.org/stable/2346887?seq=1#page_scan_tab_contents
//
-// XINBTA(p, q, beta, alhpa)
+// XINBTA(p, q, beta, alpha)
// p: the value of the parameter p.
// q: the value of the parameter q.
// beta: the value of ln B(p, q)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h
index 9827d1899e..5d4cebebd6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/fast_uniform_bits.h
@@ -57,9 +57,10 @@ constexpr UIntType IntegerLog2(UIntType n) {
// `PowerOfTwoVariate(urbg)`.
template <typename URBG>
constexpr size_t NumBits() {
- return RangeSize<URBG>() == 0
- ? std::numeric_limits<typename URBG::result_type>::digits
- : IntegerLog2(RangeSize<URBG>());
+ return static_cast<size_t>(
+ RangeSize<URBG>() == 0
+ ? std::numeric_limits<typename URBG::result_type>::digits
+ : IntegerLog2(RangeSize<URBG>()));
}
// Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/generate_real.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/generate_real.h
index c2405e93ae..33942a786a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/generate_real.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/generate_real.h
@@ -78,7 +78,7 @@ inline RealType GenerateRealFromBits(uint64_t bits, int exp_bias = 0) {
"GenerateRealFromBits must be parameterized by either float or double.");
static_assert(sizeof(uint_type) == sizeof(real_type),
- "Mismatched unsinged and real types.");
+ "Mismatched unsigned and real types.");
static_assert((std::numeric_limits<real_type>::is_iec559 &&
std::numeric_limits<real_type>::radix == 2),
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/platform.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/platform.h
index a24802011b..843355b083 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/platform.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/platform.h
@@ -131,7 +131,7 @@
// Y_ABSL_RANDOM_INTERNAL_AES_DISPATCH indicates whether the currently active
// platform has, or should use run-time dispatch for selecting the
-// acclerated Randen implementation.
+// accelerated Randen implementation.
#define Y_ABSL_RANDOM_INTERNAL_AES_DISPATCH 0
#if defined(Y_ABSL_ARCH_X86_64)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect.cc
index e925ad35ed..3b385d6252 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_detect.cc
@@ -45,6 +45,10 @@
#if defined(Y_ABSL_INTERNAL_USE_X86_CPUID)
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h> // NOLINT(build/include_order)
+#elif Y_ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
#else
// MSVC-equivalent __cpuid intrinsic function.
static void __cpuid(int cpu_info[4], int info_type) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_engine.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_engine.h
index 5e3e3f48f9..f3fdd7e40d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_engine.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_engine.h
@@ -142,7 +142,7 @@ class alignas(8) randen_engine {
// The Randen paper suggests preferentially initializing even-numbered
// 128-bit vectors of the randen state (there are 16 such vectors).
// The seed data is merged into the state offset by 128-bits, which
- // implies prefering seed bytes [16..31, ..., 208..223]. Since the
+ // implies preferring seed bytes [16..31, ..., 208..223]. Since the
// buffer is 32-bit values, we swap the corresponding buffer positions in
// 128-bit chunks.
size_t dst = kBufferSize;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes.cc
index ea099bb8e0..ce002a102a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/randen_hwaes.cc
@@ -31,7 +31,7 @@
// a hardware accelerated implementation of randen, or whether it
// will contain stubs that exit the process.
#if Y_ABSL_HAVE_ACCELERATED_AES
-// The following plaforms have implemented RandenHwAes.
+// The following platforms have implemented RandenHwAes.
#if defined(Y_ABSL_ARCH_X86_64) || defined(Y_ABSL_ARCH_X86_32) || \
defined(Y_ABSL_ARCH_PPC) || defined(Y_ABSL_ARCH_ARM) || \
defined(Y_ABSL_ARCH_AARCH64)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/uniform_helper.h b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/uniform_helper.h
index 774cc68f87..249decdb02 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/uniform_helper.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/random/internal/uniform_helper.h
@@ -217,7 +217,7 @@ using UniformDistribution =
// UniformDistributionWrapper is used as the underlying distribution type
// by the y_absl::Uniform template function. It selects the proper Abseil
// uniform distribution and provides constructor overloads that match the
-// expected parameter order as well as adjusting distribtuion bounds based
+// expected parameter order as well as adjusting distribution bounds based
// on the tag.
template <typename NumType>
struct UniformDistributionWrapper : public UniformDistribution<NumType> {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
index 91ab0ebded..fc84f20aa9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/status_internal.h
@@ -66,6 +66,10 @@ struct StatusRep {
std::atomic<int32_t> ref;
y_absl::StatusCode code;
+
+ // As an internal implementation detail, we guarantee that if status.message()
+ // is non-empty, then the resulting string_view is null terminated.
+ // This is required to implement 'StatusMessageAsCStr(...)'
TString message;
std::unique_ptr<status_internal::Payloads> payloads;
};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h
index 46469adc33..811afe05af 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/internal/statusor_internal.h
@@ -69,11 +69,8 @@ using IsConstructibleOrConvertibleOrAssignableFromStatusOr =
template <typename T, typename U>
struct IsDirectInitializationAmbiguous
: public y_absl::conditional_t<
- std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
- U>::value,
- std::false_type,
- IsDirectInitializationAmbiguous<
- T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>> {};
+ std::is_same<y_absl::remove_cvref_t<U>, U>::value, std::false_type,
+ IsDirectInitializationAmbiguous<T, y_absl::remove_cvref_t<U>>> {};
template <typename T, typename V>
struct IsDirectInitializationAmbiguous<T, y_absl::StatusOr<V>>
@@ -84,14 +81,11 @@ struct IsDirectInitializationAmbiguous<T, y_absl::StatusOr<V>>
template <typename T, typename U>
using IsDirectInitializationValid = y_absl::disjunction<
// Short circuits if T is basically U.
- std::is_same<T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<T, y_absl::remove_cvref_t<U>>,
y_absl::negation<y_absl::disjunction<
- std::is_same<y_absl::StatusOr<T>,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
- std::is_same<y_absl::Status,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
- std::is_same<y_absl::in_place_t,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::StatusOr<T>, y_absl::remove_cvref_t<U>>,
+ std::is_same<y_absl::Status, y_absl::remove_cvref_t<U>>,
+ std::is_same<y_absl::in_place_t, y_absl::remove_cvref_t<U>>,
IsDirectInitializationAmbiguous<T, U>>>>;
// This trait detects whether `StatusOr<T>::operator=(U&&)` is ambiguous, which
@@ -107,11 +101,8 @@ using IsDirectInitializationValid = y_absl::disjunction<
template <typename T, typename U>
struct IsForwardingAssignmentAmbiguous
: public y_absl::conditional_t<
- std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
- U>::value,
- std::false_type,
- IsForwardingAssignmentAmbiguous<
- T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>> {};
+ std::is_same<y_absl::remove_cvref_t<U>, U>::value, std::false_type,
+ IsForwardingAssignmentAmbiguous<T, y_absl::remove_cvref_t<U>>> {};
template <typename T, typename U>
struct IsForwardingAssignmentAmbiguous<T, y_absl::StatusOr<U>>
@@ -122,14 +113,11 @@ struct IsForwardingAssignmentAmbiguous<T, y_absl::StatusOr<U>>
template <typename T, typename U>
using IsForwardingAssignmentValid = y_absl::disjunction<
// Short circuits if T is basically U.
- std::is_same<T, y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<T, y_absl::remove_cvref_t<U>>,
y_absl::negation<y_absl::disjunction<
- std::is_same<y_absl::StatusOr<T>,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
- std::is_same<y_absl::Status,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
- std::is_same<y_absl::in_place_t,
- y_absl::remove_cv_t<y_absl::remove_reference_t<U>>>,
+ std::is_same<y_absl::StatusOr<T>, y_absl::remove_cvref_t<U>>,
+ std::is_same<y_absl::Status, y_absl::remove_cvref_t<U>>,
+ std::is_same<y_absl::in_place_t, y_absl::remove_cvref_t<U>>,
IsForwardingAssignmentAmbiguous<T, U>>>>;
class Helper {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
index e0a660b98d..73bf4d343b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.cc
@@ -80,10 +80,8 @@ std::ostream& operator<<(std::ostream& os, StatusCode code) {
namespace status_internal {
static y_absl::optional<size_t> FindPayloadIndexByUrl(
- const Payloads* payloads,
- y_absl::string_view type_url) {
- if (payloads == nullptr)
- return y_absl::nullopt;
+ const Payloads* payloads, y_absl::string_view type_url) {
+ if (payloads == nullptr) return y_absl::nullopt;
for (size_t i = 0; i < payloads->size(); ++i) {
if ((*payloads)[i].type_url == type_url) return i;
@@ -125,8 +123,7 @@ y_absl::optional<y_absl::Cord> Status::GetPayload(
const auto* payloads = GetPayloads();
y_absl::optional<size_t> index =
status_internal::FindPayloadIndexByUrl(payloads, type_url);
- if (index.has_value())
- return (*payloads)[index.value()].payload;
+ if (index.has_value()) return (*payloads)[index.value()].payload;
return y_absl::nullopt;
}
@@ -303,7 +300,7 @@ TString Status::ToStringSlow(StatusToStringMode mode) const {
y_absl::StrAppend(&text, y_absl::StatusCodeToString(code()), ": ", message());
const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
- StatusToStringMode::kWithPayload;
+ StatusToStringMode::kWithPayload;
if (with_payload) {
status_internal::StatusPayloadPrinter printer =
@@ -619,5 +616,12 @@ TString* MakeCheckFailString(const y_absl::Status* status,
} // namespace status_internal
+const char* StatusMessageAsCStr(const Status& status) {
+ // As an internal implementation detail, we guarantee that if status.message()
+ // is non-empty, then the resulting string_view is null terminated.
+ auto sv_message = status.message();
+ return sv_message.empty() ? "" : sv_message.data();
+}
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
index 6ed9d5d32a..b7f96e7c74 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/status.h
@@ -398,7 +398,7 @@ inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
//
// * It may provide more fine-grained semantic information about the error to
// facilitate actionable remedies.
-// * It may provide human-readable contexual information that is more
+// * It may provide human-readable contextual information that is more
// appropriate to display to an end user.
//
// Example:
@@ -538,7 +538,7 @@ class Status final {
//
// * It may provide more fine-grained semantic information about the error
// to facilitate actionable remedies.
- // * It may provide human-readable contexual information that is more
+ // * It may provide human-readable contextual information that is more
// appropriate to display to an end user.
//
// A payload consists of a [key,value] pair, where the key is a string
@@ -886,6 +886,15 @@ inline Status OkStatus() { return Status(); }
// message-less kCancelled errors are common in the infrastructure.
inline Status CancelledError() { return Status(y_absl::StatusCode::kCancelled); }
+// Retrieves a message's status as a null terminated C string. The lifetime of
+// this string is tied to the lifetime of the status object itself.
+//
+// If the status's message is empty, the empty string is returned.
+//
+// StatusMessageAsCStr exists for C support. Use `status.message()` in C++.
+const char* StatusMessageAsCStr(
+ const Status& status Y_ABSL_ATTRIBUTE_LIFETIME_BOUND);
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
index 428ca28a3b..a9c2a83ebc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/status/statusor.h
@@ -146,7 +146,7 @@ class Y_ABSL_MUST_USE_RESULT StatusOr;
//
// y_absl::StatusOr<int> i = GetCount();
// if (i.ok()) {
-// updated_total += *i
+// updated_total += *i;
// }
//
// NOTE: using `y_absl::StatusOr<T>::value()` when no valid value is present will
@@ -411,7 +411,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
typename = typename std::enable_if<y_absl::conjunction<
std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
y_absl::disjunction<
- std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>, T>,
+ std::is_same<y_absl::remove_cvref_t<U>, T>,
y_absl::conjunction<
y_absl::negation<std::is_convertible<U&&, y_absl::Status>>,
y_absl::negation<internal_statusor::
@@ -444,8 +444,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
internal_statusor::IsDirectInitializationValid<T, U&&>,
std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
y_absl::disjunction<
- std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
- T>,
+ std::is_same<y_absl::remove_cvref_t<U>, T>,
y_absl::conjunction<
y_absl::negation<std::is_convertible<U&&, y_absl::Status>>,
y_absl::negation<
@@ -461,8 +460,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
y_absl::conjunction<
internal_statusor::IsDirectInitializationValid<T, U&&>,
y_absl::disjunction<
- std::is_same<y_absl::remove_cv_t<y_absl::remove_reference_t<U>>,
- T>,
+ std::is_same<y_absl::remove_cvref_t<U>, T>,
y_absl::conjunction<
y_absl::negation<std::is_constructible<y_absl::Status, U&&>>,
y_absl::negation<
@@ -584,7 +582,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// Reconstructs the inner value T in-place using the provided args, using the
// T(args...) constructor. Returns reference to the reconstructed `T`.
template <typename... Args>
- T& emplace(Args&&... args) {
+ T& emplace(Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ok()) {
this->Clear();
this->MakeValue(std::forward<Args>(args)...);
@@ -600,7 +598,8 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
y_absl::enable_if_t<
std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
int> = 0>
- T& emplace(std::initializer_list<U> ilist, Args&&... args) {
+ T& emplace(std::initializer_list<U> ilist,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ok()) {
this->Clear();
this->MakeValue(ilist, std::forward<Args>(args)...);
@@ -611,6 +610,21 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
return this->data_;
}
+ // StatusOr<T>::AssignStatus()
+ //
+ // Sets the status of `y_absl::StatusOr<T>` to the given non-ok status value.
+ //
+ // NOTE: We recommend using the constructor and `operator=` where possible.
+ // This method is intended for use in generic programming, to enable setting
+ // the status of a `StatusOr<T>` when `T` may be `Status`. In that case, the
+ // constructor and `operator=` would assign into the inner value of type
+ // `Status`, rather than status of the `StatusOr` (b/280392796).
+ //
+ // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
+ // In optimized builds, passing y_absl::OkStatus() here will have the effect
+ // of passing y_absl::StatusCode::kInternal as a fallback.
+ using internal_statusor::StatusOrData<T>::AssignStatus;
+
private:
using internal_statusor::StatusOrData<T>::Assign;
template <typename U>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
index e8e7332a2a..47d07e6023 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/ascii.cc
@@ -14,6 +14,10 @@
#include "y_absl/strings/ascii.h"
+#include <climits>
+#include <cstring>
+#include <util/generic/string.h>
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace ascii_internal {
@@ -153,18 +157,62 @@ Y_ABSL_DLL const char kToUpper[256] = {
};
// clang-format on
+template <bool ToUpper>
+constexpr void AsciiStrCaseFold(char* p, char* end) {
+ // The upper- and lowercase versions of ASCII characters differ by only 1 bit.
+ // When we need to flip the case, we can xor with this bit to achieve the
+ // desired result. Note that the choice of 'a' and 'A' here is arbitrary. We
+ // could have chosen 'z' and 'Z', or any other pair of characters as they all
+ // have the same single bit difference.
+ constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
+
+ constexpr char ch_a = ToUpper ? 'a' : 'A';
+ constexpr char ch_z = ToUpper ? 'z' : 'Z';
+ for (; p < end; ++p) {
+ unsigned char v = static_cast<unsigned char>(*p);
+ // We use & instead of && to ensure this always stays branchless
+ // We use static_cast<int> to suppress -Wbitwise-instead-of-logical
+ bool is_in_range = static_cast<bool>(static_cast<int>(ch_a <= v) &
+ static_cast<int>(v <= ch_z));
+ v ^= is_in_range ? kAsciiCaseBitFlip : 0;
+ *p = static_cast<char>(v);
+ }
+}
+
+static constexpr size_t ValidateAsciiCasefold() {
+ constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
+ size_t incorrect_index = 0;
+ char lowered[num_chars] = {};
+ char uppered[num_chars] = {};
+ for (unsigned int i = 0; i < num_chars; ++i) {
+ uppered[i] = lowered[i] = static_cast<char>(i);
+ }
+ AsciiStrCaseFold<false>(&lowered[0], &lowered[num_chars]);
+ AsciiStrCaseFold<true>(&uppered[0], &uppered[num_chars]);
+ for (size_t i = 0; i < num_chars; ++i) {
+ const char ch = static_cast<char>(i),
+ ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
+ ch_lower = ('A' <= ch && ch <= 'Z' ? 'a' + (ch - 'A') : ch);
+ if (uppered[i] != ch_upper || lowered[i] != ch_lower) {
+ incorrect_index = i > 0 ? i : num_chars;
+ break;
+ }
+ }
+ return incorrect_index;
+}
+
+static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
+
} // namespace ascii_internal
void AsciiStrToLower(TString* s) {
- for (auto& ch : *s) {
- ch = y_absl::ascii_tolower(static_cast<unsigned char>(ch));
- }
+ char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
+ return ascii_internal::AsciiStrCaseFold<false>(p, p + s->size());
}
void AsciiStrToUpper(TString* s) {
- for (auto& ch : *s) {
- ch = y_absl::ascii_toupper(static_cast<unsigned char>(ch));
- }
+ char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
+ return ascii_internal::AsciiStrCaseFold<true>(p, p + s->size());
}
void RemoveExtraAsciiWhitespace(TString* str) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
index 67134e6181..a31acf1f3c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.cc
@@ -21,6 +21,7 @@
#include <limits>
#include "y_absl/base/casts.h"
+#include "y_absl/base/config.h"
#include "y_absl/numeric/bits.h"
#include "y_absl/numeric/int128.h"
#include "y_absl/strings/internal/charconv_bigint.h"
@@ -118,10 +119,17 @@ struct FloatTraits<double> {
static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
static double MakeNan(const char* tagp) {
+#if Y_ABSL_HAVE_BUILTIN(__builtin_nan)
+ // Use __builtin_nan() if available since it has a fix for
+ // https://bugs.llvm.org/show_bug.cgi?id=37778
+ // std::nan may use the glibc implementation.
+ return __builtin_nan(tagp);
+#else
// Support nan no matter which namespace it's in. Some platforms
// incorrectly don't put it in namespace std.
using namespace std; // NOLINT
return nan(tagp);
+#endif
}
// Builds a nonzero floating point number out of the provided parts.
@@ -184,10 +192,17 @@ struct FloatTraits<float> {
static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
static float MakeNan(const char* tagp) {
+#if Y_ABSL_HAVE_BUILTIN(__builtin_nanf)
+ // Use __builtin_nanf() if available since it has a fix for
+ // https://bugs.llvm.org/show_bug.cgi?id=37778
+ // std::nanf may use the glibc implementation.
+ return __builtin_nanf(tagp);
+#else
// Support nanf no matter which namespace it's in. Some platforms
// incorrectly don't put it in namespace std.
using namespace std; // NOLINT
- return nanf(tagp);
+ return std::nanf(tagp);
+#endif
}
static float Make(mantissa_t mantissa, int exponent, bool sign) {
@@ -203,7 +218,8 @@ struct FloatTraits<float> {
if (mantissa > kMantissaMask) {
// Normal value.
// Adjust by 127 for the exponent representation bias, and an additional
- // 23 due to the implied decimal point in the IEEE mantissa represenation.
+ // 23 due to the implied decimal point in the IEEE mantissa
+ // representation.
flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
<< 23;
mantissa &= kMantissaMask;
@@ -349,7 +365,8 @@ bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
// https://bugs.llvm.org/show_bug.cgi?id=37778
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86113
constexpr ptrdiff_t kNanBufferSize = 128;
-#if defined(__GNUC__) || (defined(__clang__) && __clang_major__ < 7)
+#if (defined(__GNUC__) && !defined(__clang__)) || \
+ (defined(__clang__) && __clang_major__ < 7)
volatile char n_char_sequence[kNanBufferSize];
#else
char n_char_sequence[kNanBufferSize];
@@ -462,7 +479,7 @@ uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
// the low bit of `value` is set.
//
// In inexact mode, the nonzero error means the actual value is greater
- // than the halfway point and we must alway round up.
+ // than the halfway point and we must always round up.
if ((value & 1) == 1 || !input_exact) {
++value;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h
index 9b63016e3f..46c5dc95ba 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/charconv.h
@@ -22,7 +22,7 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-// Workalike compatibilty version of std::chars_format from C++17.
+// Workalike compatibility version of std::chars_format from C++17.
//
// This is an bitfield enumerator which can be passed to y_absl::from_chars to
// configure the string-to-float conversion.
@@ -48,7 +48,7 @@ struct from_chars_result {
std::errc ec;
};
-// Workalike compatibilty version of std::from_chars from C++17. Currently
+// Workalike compatibility version of std::from_chars from C++17. Currently
// this only supports the `double` and `float` types.
//
// This interface incorporates the proposed resolutions for library issues
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
index 78aebe711f..dd647a89f8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.cc
@@ -48,7 +48,6 @@
#include "y_absl/strings/internal/cordz_update_tracker.h"
#include "y_absl/strings/internal/resize_uninitialized.h"
#include "y_absl/strings/str_cat.h"
-#include "y_absl/strings/str_format.h"
#include "y_absl/strings/str_join.h"
#include "y_absl/strings/string_view.h"
@@ -795,7 +794,7 @@ int CompareChunks(y_absl::string_view* lhs, y_absl::string_view* rhs,
}
// This overload set computes comparison results from memcmp result. This
-// interface is used inside GenericCompare below. Differet implementations
+// interface is used inside GenericCompare below. Different implementations
// are specialized for int and bool. For int we clamp result to {-1, 0, 1}
// set. For bool we just interested in "value == 0".
template <typename ResultType>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
index 872188a89a..2a81405c4a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord.h
@@ -110,9 +110,30 @@ enum class CordMemoryAccounting {
// Counts the *approximate* number of bytes held in full or in part by this
// Cord (which may not remain the same between invocations). Cords that share
// memory could each be "charged" independently for the same shared memory.
+ // See also comment on `kTotalMorePrecise` on internally shared memory.
kTotal,
// Counts the *approximate* number of bytes held in full or in part by this
+ // Cord for the distinct memory held by this cord. This option is similar
+ // to `kTotal`, except that if the cord has multiple references to the same
+ // memory, that memory is only counted once.
+ //
+ // For example:
+ // y_absl::Cord cord;
+ // cord.append(some_other_cord);
+ // cord.append(some_other_cord);
+ // // Counts `some_other_cord` twice:
+ // cord.EstimatedMemoryUsage(kTotal);
+ // // Counts `some_other_cord` once:
+ // cord.EstimatedMemoryUsage(kTotalMorePrecise);
+ //
+ // The `kTotalMorePrecise` number is more expensive to compute as it requires
+ // deduplicating all memory references. Applications should prefer to use
+ // `kFairShare` or `kTotal` unless they really need a more precise estimate
+ // on "how much memory is potentially held / kept alive by this cord?"
+ kTotalMorePrecise,
+
+ // Counts the *approximate* number of bytes held in full or in part by this
// Cord weighted by the sharing ratio of that data. For example, if some data
// edge is shared by 4 different Cords, then each cord is attributed 1/4th of
// the total memory usage as a 'fair share' of the total memory usage.
@@ -661,7 +682,7 @@ class Cord {
class CharRange {
public:
// Fulfill minimum c++ container requirements [container.requirements]
- // Theses (partial) container type definitions allow CharRange to be used
+ // These (partial) container type definitions allow CharRange to be used
// in various utilities expecting a subset of [container.requirements].
// For example, the below enables using `::testing::ElementsAre(...)`
using value_type = char;
@@ -1273,10 +1294,16 @@ inline size_t Cord::EstimatedMemoryUsage(
CordMemoryAccounting accounting_method) const {
size_t result = sizeof(Cord);
if (const y_absl::cord_internal::CordRep* rep = contents_.tree()) {
- if (accounting_method == CordMemoryAccounting::kFairShare) {
- result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
- } else {
- result += cord_internal::GetEstimatedMemoryUsage(rep);
+ switch (accounting_method) {
+ case CordMemoryAccounting::kFairShare:
+ result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
+ break;
+ case CordMemoryAccounting::kTotalMorePrecise:
+ result += cord_internal::GetMorePreciseMemoryUsage(rep);
+ break;
+ case CordMemoryAccounting::kTotal:
+ result += cord_internal::GetEstimatedMemoryUsage(rep);
+ break;
}
}
return result;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
index 9c925510cc..47e268f7a4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.cc
@@ -16,6 +16,7 @@
#include <cstddef>
#include <cstdint>
+#include <unordered_set>
#include "y_absl/base/attributes.h"
#include "y_absl/base/config.h"
@@ -37,7 +38,7 @@ namespace cord_internal {
namespace {
// Accounting mode for analyzing memory usage.
-enum class Mode { kTotal, kFairShare };
+enum class Mode { kFairShare, kTotal, kTotalMorePrecise };
// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode,
// holds a 'fraction' representing a cumulative inverse refcount weight.
@@ -62,6 +63,23 @@ struct RawUsage {
void Add(size_t size, CordRepRef<mode>) { total += size; }
};
+// Overloaded representation of RawUsage that tracks the set of objects
+// counted, and avoids double-counting objects referenced more than once
+// by the same Cord.
+template <>
+struct RawUsage<Mode::kTotalMorePrecise> {
+ size_t total = 0;
+ // TODO(b/289250880): Replace this with a flat_hash_set.
+ std::unordered_set<const CordRep*> counted;
+
+ void Add(size_t size, CordRepRef<Mode::kTotalMorePrecise> repref) {
+ if (counted.find(repref.rep) == counted.end()) {
+ counted.insert(repref.rep);
+ total += size;
+ }
+ }
+};
+
// Returns n / refcount avoiding a div for the common refcount == 1.
template <typename refcount_t>
double MaybeDiv(double d, refcount_t refcount) {
@@ -183,6 +201,10 @@ size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) {
return GetEstimatedUsage<Mode::kFairShare>(rep);
}
+size_t GetMorePreciseMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kTotalMorePrecise>(rep);
+}
+
} // namespace cord_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h
index 75f9216ae3..07e31dae77 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_analysis.h
@@ -31,6 +31,24 @@ namespace cord_internal {
size_t GetEstimatedMemoryUsage(const CordRep* rep);
// Returns the *approximate* number of bytes held in full or in part by this
+// Cord for the distinct memory held by this cord. This is similar to
+// `GetEstimatedMemoryUsage()`, except that if the cord has multiple references
+// to the same memory, that memory is only counted once.
+//
+// For example:
+// y_absl::Cord cord;
+// cord.append(some_other_cord);
+// cord.append(some_other_cord);
+// // Calls GetEstimatedMemoryUsage() and counts `other_cord` twice:
+// cord.EstimatedMemoryUsage(kTotal);
+// // Calls GetMorePreciseMemoryUsage() and counts `other_cord` once:
+// cord.EstimatedMemoryUsage(kTotalMorePrecise);
+//
+// This is more expensive than `GetEstimatedMemoryUsage()` as it requires
+// deduplicating all memory references.
+size_t GetMorePreciseMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
// CordRep weighted by the sharing ratio of that data. For example, if some data
// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
// the total memory usage as a 'fair share' of the total memory usage.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h
index 1199ed0f3d..b8fe74f50f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/cord_buffer.h
@@ -160,7 +160,6 @@ class CordBuffer {
// for more information on buffer capacities and intended usage.
static CordBuffer CreateWithDefaultLimit(size_t capacity);
-
// CordBuffer::CreateWithCustomLimit()
//
// Creates a CordBuffer instance of the desired `capacity` rounded to an
@@ -336,7 +335,7 @@ class CordBuffer {
}
// Returns the available area of the internal SSO data
- y_absl::Span<char> long_available() {
+ y_absl::Span<char> long_available() const {
assert(!is_short());
const size_t length = long_rep.rep->length;
return y_absl::Span<char>(long_rep.rep->Data() + length,
@@ -460,9 +459,7 @@ inline constexpr size_t CordBuffer::MaximumPayload() {
}
inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
- // TODO(y_absl-team): Use std::min when C++11 support is dropped.
- return (kCustomLimit < block_size ? kCustomLimit : block_size) -
- cord_internal::kFlatOverhead;
+ return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead;
}
inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
index ce90d54fad..eb5f4bbb69 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.cc
@@ -443,6 +443,8 @@ void CEscapeAndAppendInternal(y_absl::string_view src, TString* dest) {
}
}
+// Reverses the mapping in Base64EscapeInternal; see that method's
+// documentation for details of the mapping.
bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
size_t szdest, const signed char* unbase64,
size_t* len) {
@@ -676,7 +678,10 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
return ok;
}
-// The arrays below were generated by the following code
+// The arrays below map base64-escaped characters back to their original values.
+// For the inverse case, see k(WebSafe)Base64Chars in the internal
+// escaping.cc.
+// These arrays were generated by the following inversion code:
// #include <sys/time.h>
// #include <stdlib.h>
// #include <string.h>
@@ -703,8 +708,8 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
// }
// }
//
-// where the value of "Base64[]" was replaced by one of the base-64 conversion
-// tables from the functions below.
+// where the value of "Base64[]" was replaced by one of k(WebSafe)Base64Chars
+// in the internal escaping.cc.
/* clang-format off */
constexpr signed char kUnBase64[] = {
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -777,9 +782,6 @@ constexpr signed char kUnWebSafeBase64[] = {
};
/* clang-format on */
-constexpr char kWebSafeBase64Chars[] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
template <typename String>
bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
const signed char* unbase64) {
@@ -880,30 +882,6 @@ TString Utf8SafeCHexEscape(y_absl::string_view src) {
return CEscapeInternal(src, true, true);
}
-// ----------------------------------------------------------------------
-// Base64Unescape() - base64 decoder
-// Base64Escape() - base64 encoder
-// WebSafeBase64Unescape() - Google's variation of base64 decoder
-// WebSafeBase64Escape() - Google's variation of base64 encoder
-//
-// Check out
-// https://datatracker.ietf.org/doc/html/rfc2045 for formal description, but
-// what we care about is that...
-// Take the encoded stuff in groups of 4 characters and turn each
-// character into a code 0 to 63 thus:
-// A-Z map to 0 to 25
-// a-z map to 26 to 51
-// 0-9 map to 52 to 61
-// +(- for WebSafe) maps to 62
-// /(_ for WebSafe) maps to 63
-// There will be four numbers, all less than 64 which can be represented
-// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
-// Arrange the 6 digit binary numbers into three bytes as such:
-// aaaaaabb bbbbcccc ccdddddd
-// Equals signs (one or two) are used at the end of the encoded block to
-// indicate that the text was not an integer multiple of three bytes long.
-// ----------------------------------------------------------------------
-
bool Base64Unescape(y_absl::string_view src, TString* dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
}
@@ -921,7 +899,7 @@ void Base64Escape(y_absl::string_view src, TString* dest) {
void WebSafeBase64Escape(y_absl::string_view src, TString* dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
- false, kWebSafeBase64Chars);
+ false, strings_internal::kWebSafeBase64Chars);
}
TString Base64Escape(y_absl::string_view src) {
@@ -936,7 +914,7 @@ TString WebSafeBase64Escape(y_absl::string_view src) {
TString dest;
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
- false, kWebSafeBase64Chars);
+ false, strings_internal::kWebSafeBase64Chars);
return dest;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h
index ab83ba5f97..3b5e0537b6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/escaping.h
@@ -121,7 +121,7 @@ TString Utf8SafeCHexEscape(y_absl::string_view src);
//
// Encodes a `src` string into a base64-encoded 'dest' string with padding
// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
-// 2045. See also CalculateBase64EscapedLen().
+// 2045.
void Base64Escape(y_absl::string_view src, TString* dest);
TString Base64Escape(y_absl::string_view src);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
index 924bea66e3..0f7f9a9983 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.cc
@@ -296,10 +296,8 @@ template <int max_words>
std::min(n / kLargePowerOfFiveStep, kLargestPowerOfFiveIndex);
if (first_pass) {
// just copy, rather than multiplying by 1
- std::copy(
- LargePowerOfFiveData(big_power),
- LargePowerOfFiveData(big_power) + LargePowerOfFiveSize(big_power),
- answer.words_);
+ std::copy_n(LargePowerOfFiveData(big_power),
+ LargePowerOfFiveSize(big_power), answer.words_);
answer.size_ = LargePowerOfFiveSize(big_power);
first_pass = false;
} else {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h
index 01fb544ec3..fe9a61f943 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/charconv_bigint.h
@@ -92,7 +92,7 @@ class BigUnsigned {
// numbers with this many decimal digits or fewer are representable by this
// type.
//
- // Analagous to std::numeric_limits<BigUnsigned>::digits10.
+ // Analogous to std::numeric_limits<BigUnsigned>::digits10.
static constexpr int Digits10() {
// 9975007/1035508 is very slightly less than log10(2**32).
return static_cast<uint64_t>(max_words) * 9975007 / 1035508;
@@ -121,7 +121,7 @@ class BigUnsigned {
++size_;
}
}
- std::fill(words_, words_ + word_shift, 0u);
+ std::fill_n(words_, word_shift, 0u);
}
}
@@ -197,7 +197,7 @@ class BigUnsigned {
}
void SetToZero() {
- std::fill(words_, words_ + size_, 0u);
+ std::fill_n(words_, size_, 0u);
size_ = 0;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
index 22a890c232..8c6e79a066 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.cc
@@ -33,7 +33,6 @@ Y_ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault);
Y_ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault);
-Y_ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
void LogFatalNodeType(CordRep* rep) {
Y_ABSL_INTERNAL_LOG(FATAL, y_absl::StrCat("Unexpected node type: ",
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
index c1f12f1428..edbc682e20 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_internal.h
@@ -69,12 +69,6 @@ enum CordFeatureDefaults {
extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled;
-// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
-// in debug assertions, and code that calls `IsValid()` explicitly. By default,
-// assertions should be relatively cheap and AssertValid() can easily lead to
-// O(n^2) complexity as recursive / full tree validation is O(n).
-extern std::atomic<bool> cord_btree_exhaustive_validation;
-
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
@@ -163,20 +157,19 @@ class RefcountAndFlags {
// false will be visible to a thread that just observed this method returning
// false. Always returns false when the immortal bit is set.
inline bool Decrement() {
- int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
- assert(refcount > 0 || refcount & kImmortalFlag);
+ int32_t refcount = count_.load(std::memory_order_acquire);
+ assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement &&
(count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
- kRefcountMask) != kRefIncrement;
+ kHighRefcountMask) != 0;
}
// Same as Decrement but expect that refcount is greater than 1.
inline bool DecrementExpectHighRefcount() {
int32_t refcount =
- count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
- kRefcountMask;
- assert(refcount > 0 || refcount & kImmortalFlag);
- return refcount != kRefIncrement;
+ count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel);
+ assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
+ return (refcount & kHighRefcountMask) != 0;
}
// Returns the current reference count using acquire semantics.
@@ -220,6 +213,15 @@ class RefcountAndFlags {
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
kRefcountMask = ~kReservedFlag,
+
+ // Bitmask to use when checking if refcount is equal to 1 and not
+ // immortal when decrementing the refcount. This masks out kRefIncrement and
+ // all flags except kImmortalFlag. If the masked RefcountAndFlags is 0, we
+ // assume the refcount is equal to 1, since we know it's not immortal and
+ // not greater than 1. If the masked RefcountAndFlags is not 0, we can
+ // assume the refcount is not equal to 1 since either a higher bit in the
+ // refcount is set, or kImmortal is set.
+ kHighRefcountMask = kRefcountMask & ~kRefIncrement,
};
std::atomic<int32_t> count_;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
index 3f283ed4f1..3aa2267c69 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.cc
@@ -14,6 +14,7 @@
#include "y_absl/strings/internal/cord_rep_btree.h"
+#include <atomic>
#include <cassert>
#include <cstdint>
#include <iostream>
@@ -49,9 +50,7 @@ using CopyResult = CordRepBtree::CopyResult;
constexpr auto kFront = CordRepBtree::kFront;
constexpr auto kBack = CordRepBtree::kBack;
-inline bool exhaustive_validation() {
- return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
-}
+Y_ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
// Implementation of the various 'Dump' functions.
// Prints the entire tree structure or 'rep'. External callers should
@@ -362,6 +361,15 @@ struct StackOperations {
} // namespace
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
+ cord_btree_exhaustive_validation.store(do_exaustive_validation,
+ std::memory_order_relaxed);
+}
+
+bool IsCordBtreeExhaustiveValidationEnabled() {
+ return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
+}
+
void CordRepBtree::Dump(const CordRep* rep, y_absl::string_view label,
bool include_contents, std::ostream& stream) {
stream << "===================================\n";
@@ -450,7 +458,8 @@ bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
child_length += edge->length;
}
NODE_CHECK_EQ(child_length, tree->length);
- if ((!shallow || exhaustive_validation()) && tree->height() > 0) {
+ if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
+ tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
if (!IsValid(edge->btree(), shallow)) return false;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
index c013bbbdc7..0d5b4ad427 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_btree.h
@@ -32,6 +32,14 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
+// `SetCordBtreeExhaustiveValidation()` can be set to force exhaustive
+// validation in debug assertions, and code that calls `IsValid()`
+// explicitly. By default, assertions should be relatively cheap and
+// AssertValid() can easily lead to O(n^2) complexity as recursive / full tree
+// validation is O(n).
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation);
+bool IsCordBtreeExhaustiveValidationEnabled();
+
class CordRepBtreeNavigator;
// CordRepBtree is as the name implies a btree implementation of a Cordrep tree.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
index a72823ece5..e9d01b783f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.cc
@@ -42,7 +42,8 @@ CordRep* ClipSubstring(CordRepSubstring* substring) {
} // namespace
-void Consume(CordRep* rep, ConsumeFn consume_fn) {
+void Consume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
size_t offset = 0;
size_t length = rep->length;
@@ -53,8 +54,9 @@ void Consume(CordRep* rep, ConsumeFn consume_fn) {
consume_fn(rep, offset, length);
}
-void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(rep, std::move(consume_fn));
+void ReverseConsume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
+ return Consume(rep, consume_fn);
}
} // namespace cord_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h
index 6e2b887a42..00c8483211 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_consume.h
@@ -24,11 +24,6 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-// Functor for the Consume() and ReverseConsume() functions:
-// void ConsumeFunc(CordRep* rep, size_t offset, size_t length);
-// See the Consume() and ReverseConsume() function comments for documentation.
-using ConsumeFn = FunctionRef<void(CordRep*, size_t, size_t)>;
-
// Consume() and ReverseConsume() consume CONCAT based trees and invoke the
// provided functor with the contained nodes in the proper forward or reverse
// order, which is used to convert CONCAT trees into other tree or cord data.
@@ -40,8 +35,10 @@ using ConsumeFn = FunctionRef<void(CordRep*, size_t, size_t)>;
// violations, we can not 100% guarantee that all code respects 'new format'
// settings and flags, so we need to be able to parse old data on the fly until
// all old code is deprecated / no longer the default format.
-void Consume(CordRep* rep, ConsumeFn consume_fn);
-void ReverseConsume(CordRep* rep, ConsumeFn consume_fn);
+void Consume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
+void ReverseConsume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
} // namespace cord_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
index b5ac831903..40e5a1b08c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_flat.h
@@ -120,8 +120,16 @@ struct CordRepFlat : public CordRep {
// Round size up so it matches a size we can exactly express in a tag.
const size_t size = RoundUpForTag(len + kFlatOverhead);
void* const raw_rep = ::operator new(size);
+ // GCC 13 has a false-positive -Wstringop-overflow warning here.
+ #if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wstringop-overflow"
+ #endif
CordRepFlat* rep = new (raw_rep) CordRepFlat();
rep->tag = AllocatedSizeToTag(size);
+ #if Y_ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+ #pragma GCC diagnostic pop
+ #endif
return rep;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h
index b4bee354d8..c7698e6fd8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cord_rep_ring.h
@@ -430,7 +430,7 @@ class CordRepRing : public CordRep {
// capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
//
// If a new CordRepRing can not be allocated, or the new capacity would exceed
- // the maxmimum capacity, then the input is consumed only, and an exception is
+ // the maximum capacity, then the input is consumed only, and an exception is
// thrown.
static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
@@ -472,7 +472,7 @@ class CordRepRing : public CordRep {
// Increases the data offset for entry `index` by `n`.
void AddDataOffset(index_type index, size_t n);
- // Descreases the length for entry `index` by `n`.
+ // Decreases the length for entry `index` by `n`.
void SubLength(index_type index, size_t n);
index_type head_;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
index 56988f42bb..24baed9183 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.cc
@@ -16,34 +16,60 @@
#include <atomic>
#include "y_absl/base/internal/raw_logging.h" // For Y_ABSL_RAW_CHECK
-#include "y_absl/base/internal/spinlock.h"
+#include "y_absl/synchronization/mutex.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-using ::y_absl::base_internal::SpinLockHolder;
+namespace {
-Y_ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(y_absl::kConstInit);
+struct Queue {
+ Queue() = default;
+
+ y_absl::Mutex mutex;
+ std::atomic<CordzHandle*> dq_tail Y_ABSL_GUARDED_BY(mutex){nullptr};
+
+ // Returns true if this delete queue is empty. This method does not acquire
+ // the lock, but does a 'load acquire' observation on the delete queue tail.
+ // It is used inside Delete() to check for the presence of a delete queue
+ // without holding the lock. The assumption is that the caller is in the
+ // state of 'being deleted', and can not be newly discovered by a concurrent
+ // 'being constructed' snapshot instance. Practically, this means that any
+ // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
+ // before / after' semantics and atomic fences.
+ bool IsEmpty() const Y_ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return dq_tail.load(std::memory_order_acquire) == nullptr;
+ }
+};
+
+static Queue* GlobalQueue() {
+ static Queue* global_queue = new Queue;
+ return global_queue;
+}
+
+} // namespace
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
+ Queue* global_queue = GlobalQueue();
if (is_snapshot) {
- SpinLockHolder lock(&queue_->mutex);
- CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire);
+ MutexLock lock(&global_queue->mutex);
+ CordzHandle* dq_tail =
+ global_queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
- queue_->dq_tail.store(this, std::memory_order_release);
+ global_queue->dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
- ODRCheck();
+ Queue* global_queue = GlobalQueue();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
- SpinLockHolder lock(&queue_->mutex);
+ MutexLock lock(&global_queue->mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach
@@ -59,7 +85,7 @@ CordzHandle::~CordzHandle() {
if (next) {
next->dq_prev_ = dq_prev_;
} else {
- queue_->dq_tail.store(dq_prev_, std::memory_order_release);
+ global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
@@ -69,16 +95,15 @@ CordzHandle::~CordzHandle() {
}
bool CordzHandle::SafeToDelete() const {
- return is_snapshot_ || queue_->IsEmpty();
+ return is_snapshot_ || GlobalQueue()->IsEmpty();
}
void CordzHandle::Delete(CordzHandle* handle) {
assert(handle);
if (handle) {
- handle->ODRCheck();
- Queue* const queue = handle->queue_;
+ Queue* const queue = GlobalQueue();
if (!handle->SafeToDelete()) {
- SpinLockHolder lock(&queue->mutex);
+ MutexLock lock(&queue->mutex);
CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
@@ -93,8 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
- SpinLockHolder lock(&global_queue_.mutex);
- CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire);
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
+ CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
@@ -103,13 +129,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const {
- ODRCheck();
if (!is_snapshot_) return false;
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
- SpinLockHolder lock(&queue_->mutex);
- for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) {
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
+ for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
@@ -119,13 +145,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
- ODRCheck();
std::vector<const CordzHandle*> handles;
if (!is_snapshot()) {
return handles;
}
- SpinLockHolder lock(&queue_->mutex);
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h
index ab5d895e06..29fec32d54 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_handle.h
@@ -20,8 +20,6 @@
#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
-#include "y_absl/base/internal/spinlock.h"
-#include "y_absl/synchronization/mutex.h"
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -34,7 +32,7 @@ namespace cord_internal {
// has gained visibility into a CordzInfo object, that CordzInfo object will not
// be deleted prematurely. This allows the profiler to inspect all CordzInfo
// objects that are alive without needing to hold a global lock.
-class CordzHandle {
+class Y_ABSL_DLL CordzHandle {
public:
CordzHandle() : CordzHandle(false) {}
@@ -79,37 +77,6 @@ class CordzHandle {
virtual ~CordzHandle();
private:
- // Global queue data. CordzHandle stores a pointer to the global queue
- // instance to harden against ODR violations.
- struct Queue {
- constexpr explicit Queue(y_absl::ConstInitType)
- : mutex(y_absl::kConstInit,
- y_absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
-
- y_absl::base_internal::SpinLock mutex;
- std::atomic<CordzHandle*> dq_tail Y_ABSL_GUARDED_BY(mutex){nullptr};
-
- // Returns true if this delete queue is empty. This method does not acquire
- // the lock, but does a 'load acquire' observation on the delete queue tail.
- // It is used inside Delete() to check for the presence of a delete queue
- // without holding the lock. The assumption is that the caller is in the
- // state of 'being deleted', and can not be newly discovered by a concurrent
- // 'being constructed' snapshot instance. Practically, this means that any
- // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
- // before / after' semantics and atomic fences.
- bool IsEmpty() const Y_ABSL_NO_THREAD_SAFETY_ANALYSIS {
- return dq_tail.load(std::memory_order_acquire) == nullptr;
- }
- };
-
- void ODRCheck() const {
-#ifndef NDEBUG
- Y_ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
-#endif
- }
-
- Y_ABSL_CONST_INIT static Queue global_queue_;
- Queue* const queue_ = &global_queue_;
const bool is_snapshot_;
// dq_prev_ and dq_next_ require the global queue mutex to be held.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
index beabc48dd9..4e39a98f4a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_info.cc
@@ -26,6 +26,7 @@
#include "y_absl/strings/internal/cordz_statistics.h"
#include "y_absl/strings/internal/cordz_update_tracker.h"
#include "y_absl/synchronization/mutex.h"
+#include "y_absl/time/clock.h"
#include "y_absl/types/span.h"
namespace y_absl {
@@ -53,7 +54,7 @@ namespace {
// The top level node is treated specially: we assume the current thread
// (typically called from the CordzHandler) to hold a reference purely to
// perform a safe analysis, and not being part of the application. So we
-// substract 1 from the reference count of the top node to compute the
+// subtract 1 from the reference count of the top node to compute the
// 'application fair share' excluding the reference of the current thread.
//
// An example of fair sharing, and why we multiply reference counts:
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h
index 821c1566b6..7849282837 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/cordz_sample_token.h
@@ -33,11 +33,11 @@ namespace cord_internal {
// ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail
//
// This list tracks that CH1 and CH2 were created after ST1, so the thread
-// holding ST1 might have a referece to CH1, CH2, ST2, and CH3. However, ST2 was
-// created later, so the thread holding the ST2 token cannot have a reference to
-// ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will delete ST1,
-// CH1, and CH2. If instead ST2 is cleaned up first, that thread will only
-// delete ST2.
+// holding ST1 might have a reference to CH1, CH2, ST2, and CH3. However, ST2
+// was created later, so the thread holding the ST2 token cannot have a
+// reference to ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will
+// delete ST1, CH1, and CH2. If instead ST2 is cleaned up first, that thread
+// will only delete ST2.
//
// If ST1 is cleaned up first, the new list will be:
// ST2 <- CH3 <- global_delete_queue_tail
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
index 421838cc85..94d3a21db3 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.cc
@@ -21,9 +21,17 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace strings_internal {
+// The two strings below provide maps from normal 6-bit characters to their
+// base64-escaped equivalent.
+// For the inverse case, see kUn(WebSafe)Base64 in the external
+// escaping.cc.
Y_ABSL_CONST_INIT const char kBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+Y_ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
// Base64 encodes three bytes of input at a time. If the input is not
// divisible by three, we pad as appropriate.
@@ -62,6 +70,21 @@ size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
return len;
}
+// ----------------------------------------------------------------------
+// Take the input in groups of 4 characters and turn each
+// character into a code 0 to 63 thus:
+// A-Z map to 0 to 25
+// a-z map to 26 to 51
+// 0-9 map to 52 to 61
+// +(- for WebSafe) maps to 62
+// /(_ for WebSafe) maps to 63
+// There will be four numbers, all less than 64 which can be represented
+// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+// Arrange the 6 digit binary numbers into three bytes as such:
+// aaaaaabb bbbbcccc ccdddddd
+// Equals signs (one or two) are used at the end of the encoded block to
+// indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
size_t szdest, const char* base64,
bool do_padding) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h
index 1b49ef4bc0..df72538b59 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/escaping.h
@@ -24,6 +24,7 @@ Y_ABSL_NAMESPACE_BEGIN
namespace strings_internal {
Y_ABSL_CONST_INIT extern const char kBase64Chars[];
+Y_ABSL_CONST_INIT extern const char kWebSafeBase64Chars[];
// Calculates the length of a Base64 encoding (RFC 4648) of a string of length
// `input_len`, with or without padding per `do_padding`. Note that 'web-safe'
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc
index 7d1ed56a3a..5b30b22ee1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.cc
@@ -16,6 +16,8 @@
#include <cstdlib>
+#include "y_absl/strings/ascii.h"
+
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace strings_internal {
@@ -33,83 +35,6 @@ int memcasecmp(const char* s1, const char* s2, size_t len) {
return 0;
}
-char* memdup(const char* s, size_t slen) {
- void* copy;
- if ((copy = malloc(slen)) == nullptr) return nullptr;
- memcpy(copy, s, slen);
- return reinterpret_cast<char*>(copy);
-}
-
-char* memrchr(const char* s, int c, size_t slen) {
- for (const char* e = s + slen - 1; e >= s; e--) {
- if (*e == c) return const_cast<char*>(e);
- }
- return nullptr;
-}
-
-size_t memspn(const char* s, size_t slen, const char* accept) {
- const char* p = s;
- const char* spanp;
- char c, sc;
-
-cont:
- c = *p++;
- if (slen-- == 0)
- return static_cast<size_t>(p - 1 - s);
- for (spanp = accept; (sc = *spanp++) != '\0';)
- if (sc == c) goto cont;
- return static_cast<size_t>(p - 1 - s);
-}
-
-size_t memcspn(const char* s, size_t slen, const char* reject) {
- const char* p = s;
- const char* spanp;
- char c, sc;
-
- while (slen-- != 0) {
- c = *p++;
- for (spanp = reject; (sc = *spanp++) != '\0';)
- if (sc == c)
- return static_cast<size_t>(p - 1 - s);
- }
- return static_cast<size_t>(p - s);
-}
-
-char* mempbrk(const char* s, size_t slen, const char* accept) {
- const char* scanp;
- int sc;
-
- for (; slen; ++s, --slen) {
- for (scanp = accept; (sc = *scanp++) != '\0';)
- if (sc == *s) return const_cast<char*>(s);
- }
- return nullptr;
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches. See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
- size_t neelen) {
- if (0 == neelen) {
- return phaystack; // even if haylen is 0
- }
- if (haylen < neelen) return nullptr;
-
- const char* match;
- const char* hayend = phaystack + haylen - neelen + 1;
- // A static cast is used here to work around the fact that memchr returns
- // a void* on Posix-compliant systems and const void* on Windows.
- while (
- (match = static_cast<const char*>(memchr(
- phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
- if (memcmp(match, pneedle, neelen) == 0)
- return match;
- else
- phaystack = match + 1;
- }
- return nullptr;
-}
-
} // namespace strings_internal
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h
index dfc1a2c293..33c3f22456 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/memutil.h
@@ -14,51 +14,6 @@
// limitations under the License.
//
-// These routines provide mem versions of standard C string routines,
-// such as strpbrk. They function exactly the same as the str versions,
-// so if you wonder what they are, replace the word "mem" by
-// "str" and check out the man page. I could return void*, as the
-// strutil.h mem*() routines tend to do, but I return char* instead
-// since this is by far the most common way these functions are called.
-//
-// The difference between the mem and str versions is the mem version
-// takes a pointer and a length, rather than a '\0'-terminated string.
-// The memcase* routines defined here assume the locale is "C"
-// (they use y_absl::ascii_tolower instead of tolower).
-//
-// These routines are based on the BSD library.
-//
-// Here's a list of routines from string.h, and their mem analogues.
-// Functions in lowercase are defined in string.h; those in UPPERCASE
-// are defined here:
-//
-// strlen --
-// strcat strncat MEMCAT
-// strcpy strncpy memcpy
-// -- memccpy (very cool function, btw)
-// -- memmove
-// -- memset
-// strcmp strncmp memcmp
-// strcasecmp strncasecmp MEMCASECMP
-// strchr memchr
-// strcoll --
-// strxfrm --
-// strdup strndup MEMDUP
-// strrchr MEMRCHR
-// strspn MEMSPN
-// strcspn MEMCSPN
-// strpbrk MEMPBRK
-// strstr MEMSTR MEMMEM
-// (g)strcasestr MEMCASESTR MEMCASEMEM
-// strtok --
-// strprefix MEMPREFIX (strprefix is from strutil.h)
-// strcaseprefix MEMCASEPREFIX (strcaseprefix is from strutil.h)
-// strsuffix MEMSUFFIX (strsuffix is from strutil.h)
-// strcasesuffix MEMCASESUFFIX (strcasesuffix is from strutil.h)
-// -- MEMIS
-// -- MEMCASEIS
-// strcount MEMCOUNT (strcount is from strutil.h)
-
#ifndef Y_ABSL_STRINGS_INTERNAL_MEMUTIL_H_
#define Y_ABSL_STRINGS_INTERNAL_MEMUTIL_H_
@@ -72,74 +27,11 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace strings_internal {
-inline char* memcat(char* dest, size_t destlen, const char* src,
- size_t srclen) {
- return reinterpret_cast<char*>(memcpy(dest + destlen, src, srclen));
-}
-
+// Performs a byte-by-byte comparison of `len` bytes of the strings `s1` and
+// `s2`, ignoring the case of the characters. It returns an integer less than,
+// equal to, or greater than zero if `s1` is found, respectively, to be less
+// than, to match, or be greater than `s2`.
int memcasecmp(const char* s1, const char* s2, size_t len);
-char* memdup(const char* s, size_t slen);
-char* memrchr(const char* s, int c, size_t slen);
-size_t memspn(const char* s, size_t slen, const char* accept);
-size_t memcspn(const char* s, size_t slen, const char* reject);
-char* mempbrk(const char* s, size_t slen, const char* accept);
-
-// This is for internal use only. Don't call this directly
-template <bool case_sensitive>
-const char* int_memmatch(const char* haystack, size_t haylen,
- const char* needle, size_t neelen) {
- if (0 == neelen) {
- return haystack; // even if haylen is 0
- }
- const char* hayend = haystack + haylen;
- const char* needlestart = needle;
- const char* needleend = needlestart + neelen;
-
- for (; haystack < hayend; ++haystack) {
- char hay = case_sensitive
- ? *haystack
- : y_absl::ascii_tolower(static_cast<unsigned char>(*haystack));
- char nee = case_sensitive
- ? *needle
- : y_absl::ascii_tolower(static_cast<unsigned char>(*needle));
- if (hay == nee) {
- if (++needle == needleend) {
- return haystack + 1 - neelen;
- }
- } else if (needle != needlestart) {
- // must back up haystack in case a prefix matched (find "aab" in "aaab")
- haystack -= needle - needlestart; // for loop will advance one more
- needle = needlestart;
- }
- }
- return nullptr;
-}
-
-// These are the guys you can call directly
-inline const char* memstr(const char* phaystack, size_t haylen,
- const char* pneedle) {
- return int_memmatch<true>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memcasestr(const char* phaystack, size_t haylen,
- const char* pneedle) {
- return int_memmatch<false>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memmem(const char* phaystack, size_t haylen,
- const char* pneedle, size_t needlelen) {
- return int_memmatch<true>(phaystack, haylen, pneedle, needlelen);
-}
-
-inline const char* memcasemem(const char* phaystack, size_t haylen,
- const char* pneedle, size_t needlelen) {
- return int_memmatch<false>(phaystack, haylen, pneedle, needlelen);
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches. See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
- size_t neelen);
} // namespace strings_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h
index f50eef9dee..34b93c4bf6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/stl_type_traits.h
@@ -13,7 +13,7 @@
// limitations under the License.
//
-// Thie file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
+// The file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
// trait metafunction to assist in working with the _GLIBCXX_DEBUG debug
// wrappers of STL containers.
//
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
index 67ecc3af7c..b28eb4cbf9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.cc
@@ -106,7 +106,7 @@ class IntDigits {
char *p = storage_ + sizeof(storage_);
do {
p -= 2;
- numbers_internal::PutTwoDigits(static_cast<size_t>(v % 100), p);
+ numbers_internal::PutTwoDigits(static_cast<uint32_t>(v % 100), p);
v /= 100;
} while (v);
if (p[0] == '0') {
@@ -278,24 +278,6 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits,
return true;
}
-template <typename T,
- typename std::enable_if<(std::is_integral<T>::value &&
- std::is_signed<T>::value) ||
- std::is_same<T, int128>::value,
- int>::type = 0>
-constexpr auto ConvertV(T) {
- return FormatConversionCharInternal::d;
-}
-
-template <typename T,
- typename std::enable_if<(std::is_integral<T>::value &&
- std::is_unsigned<T>::value) ||
- std::is_same<T, uint128>::value,
- int>::type = 0>
-constexpr auto ConvertV(T) {
- return FormatConversionCharInternal::u;
-}
-
template <typename T>
bool ConvertFloatArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
if (conv.conversion_char() == FormatConversionCharInternal::v) {
@@ -332,10 +314,6 @@ bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
using U = typename MakeUnsigned<T>::type;
IntDigits as_digits;
- if (conv.conversion_char() == FormatConversionCharInternal::v) {
- conv.set_conversion_char(ConvertV(T{}));
- }
-
// This odd casting is due to a bug in -Wswitch behavior in gcc49 which causes
// it to complain about a switch/case type mismatch, even though both are
// FormatConverionChar. Likely this is because at this point
@@ -361,6 +339,7 @@ bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
case static_cast<uint8_t>(FormatConversionCharInternal::d):
case static_cast<uint8_t>(FormatConversionCharInternal::i):
+ case static_cast<uint8_t>(FormatConversionCharInternal::v):
as_digits.PrintAsDec(v);
break;
@@ -482,18 +461,18 @@ CharConvertResult FormatConvertImpl(char v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-CharConvertResult FormatConvertImpl(signed char v,
- const FormatConversionSpecImpl conv,
- FormatSinkImpl *sink) {
+
+// ==================== Ints ====================
+IntegralConvertResult FormatConvertImpl(signed char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-CharConvertResult FormatConvertImpl(unsigned char v,
- const FormatConversionSpecImpl conv,
- FormatSinkImpl *sink) {
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-
-// ==================== Ints ====================
IntegralConvertResult FormatConvertImpl(short v, // NOLINT
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
index 8ed5827835..5eb9c7fd24 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/arg.h
@@ -280,14 +280,14 @@ FloatingConvertResult FormatConvertImpl(long double v,
// Chars.
CharConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
-CharConvertResult FormatConvertImpl(signed char v,
- FormatConversionSpecImpl conv,
- FormatSinkImpl* sink);
-CharConvertResult FormatConvertImpl(unsigned char v,
- FormatConversionSpecImpl conv,
- FormatSinkImpl* sink);
// Ints.
+IntegralConvertResult FormatConvertImpl(signed char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
IntegralConvertResult FormatConvertImpl(short v, // NOLINT
FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
@@ -443,7 +443,7 @@ class FormatArgImpl {
// For everything else:
// - Decay char* and char arrays into `const char*`
// - Decay any other pointer to `const void*`
- // - Decay all enums to their underlying type.
+ // - Decay all enums to the integral promotion of their underlying type.
// - Decay function pointers to void*.
template <typename T, typename = void>
struct DecayType {
@@ -463,7 +463,7 @@ class FormatArgImpl {
!str_format_internal::HasUserDefinedConvert<T>::value &&
!strings_internal::HasAbslStringify<T>::value &&
std::is_enum<T>::value>::type> {
- using type = typename std::underlying_type<T>::type;
+ using type = decltype(+typename std::underlying_type<T>::type());
};
public:
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
index daa756c5f4..ec12caedae 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/bind.h
@@ -21,6 +21,7 @@
#include <util/generic/string.h>
#include "y_absl/base/port.h"
+#include "y_absl/container/inlined_vector.h"
#include "y_absl/strings/internal/str_format/arg.h"
#include "y_absl/strings/internal/str_format/checker.h"
#include "y_absl/strings/internal/str_format/parser.h"
@@ -177,17 +178,7 @@ class Streamable {
public:
Streamable(const UntypedFormatSpecImpl& format,
y_absl::Span<const FormatArgImpl> args)
- : format_(format) {
- if (args.size() <= Y_ABSL_ARRAYSIZE(few_args_)) {
- for (size_t i = 0; i < args.size(); ++i) {
- few_args_[i] = args[i];
- }
- args_ = y_absl::MakeSpan(few_args_, args.size());
- } else {
- many_args_.assign(args.begin(), args.end());
- args_ = many_args_;
- }
- }
+ : format_(format), args_(args.begin(), args.end()) {}
std::ostream& Print(std::ostream& os) const;
@@ -197,12 +188,7 @@ class Streamable {
private:
const UntypedFormatSpecImpl& format_;
- y_absl::Span<const FormatArgImpl> args_;
- // if args_.size() is 4 or less:
- FormatArgImpl few_args_[4] = {FormatArgImpl(0), FormatArgImpl(0),
- FormatArgImpl(0), FormatArgImpl(0)};
- // if args_.size() is more than 4:
- std::vector<FormatArgImpl> many_args_;
+ y_absl::InlinedVector<FormatArgImpl, 4> args_;
};
// for testing
@@ -211,8 +197,7 @@ TString Summarize(UntypedFormatSpecImpl format,
bool BindWithPack(const UnboundConversion* props,
y_absl::Span<const FormatArgImpl> pack, BoundConversion* bound);
-bool FormatUntyped(FormatRawSinkImpl raw_sink,
- UntypedFormatSpecImpl format,
+bool FormatUntyped(FormatRawSinkImpl raw_sink, UntypedFormatSpecImpl format,
y_absl::Span<const FormatArgImpl> args);
TString& AppendPack(TString* out, UntypedFormatSpecImpl format,
@@ -231,7 +216,7 @@ int SnprintF(char* output, size_t size, UntypedFormatSpecImpl format,
template <typename T>
class StreamedWrapper {
public:
- explicit StreamedWrapper(const T& v) : v_(v) { }
+ explicit StreamedWrapper(const T& v) : v_(v) {}
private:
template <typename S>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/constexpr_parser.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/constexpr_parser.h
index d3cc23a00c..17703f5933 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/constexpr_parser.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/constexpr_parser.h
@@ -323,6 +323,7 @@ constexpr const char* ConsumeConversion(const char* pos, const char* const end,
if (Y_ABSL_PREDICT_FALSE(c == 'v')) return nullptr;
if (Y_ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr;
}
+#undef Y_ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR
assert(CheckFastPathSetting(*conv));
(void)(&CheckFastPathSetting);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
index 01a2063b17..20aa033e65 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/extension.h
@@ -273,7 +273,7 @@ struct FormatConversionSpecImplFriend;
class FormatConversionSpecImpl {
public:
- // Width and precison are not specified, no flags are set.
+ // Width and precision are not specified, no flags are set.
bool is_basic() const { return flags_ == Flags::kBasic; }
bool has_left_flag() const { return FlagsContains(flags_, Flags::kLeft); }
bool has_show_pos_flag() const {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc
index b109e93103..d6364aab92 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_format/float_conversion.cc
@@ -711,12 +711,12 @@ bool IncrementNibble(size_t nibble_index, Int* n) {
constexpr size_t kShift = sizeof(Int) * 8 - 1;
constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4;
Int before = *n >> kShift;
- // Here we essentially want to take the number 1 and move it into the requsted
- // nibble, then add it to *n to effectively increment the nibble. However,
- // ASan will complain if we try to shift the 1 beyond the limits of the Int,
- // i.e., if the nibble_index is out of range. So therefore we check for this
- // and if we are out of range we just add 0 which leaves *n unchanged, which
- // seems like the reasonable thing to do in that case.
+ // Here we essentially want to take the number 1 and move it into the
+ // requested nibble, then add it to *n to effectively increment the nibble.
+ // However, ASan will complain if we try to shift the 1 beyond the limits of
+ // the Int, i.e., if the nibble_index is out of range. So therefore we check
+ // for this and if we are out of range we just add 0 which leaves *n
+ // unchanged, which seems like the reasonable thing to do in that case.
*n += ((nibble_index >= kNumNibbles)
? 0
: (Int{1} << static_cast<int>(nibble_index * 4)));
@@ -937,7 +937,7 @@ void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp,
// =============== Exponent ==================
constexpr size_t kBufSizeForExpDecRepr =
- numbers_internal::kFastToBufferSize // requred for FastIntToBuffer
+ numbers_internal::kFastToBufferSize // required for FastIntToBuffer
+ 1 // 'p' or 'P'
+ 1; // '+' or '-'
char exp_buffer[kBufSizeForExpDecRepr];
@@ -1015,7 +1015,7 @@ struct Buffer {
--end;
}
- char &back() {
+ char &back() const {
assert(begin < end);
return end[-1];
}
@@ -1102,7 +1102,7 @@ void PrintExponent(int exp, char e, Buffer *out) {
template <typename Float, typename Int>
constexpr bool CanFitMantissa() {
return
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
// Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
// Casting from long double to uint64_t is miscompiled and drops bits.
(!std::is_same<Float, long double>::value ||
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h
index 1f1d7342ce..19afc28ee5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/internal/str_split_internal.h
@@ -235,6 +235,24 @@ struct SplitterIsConvertibleTo
HasMappedType<C>::value> {
};
+template <typename StringType, typename Container, typename = void>
+struct ShouldUseLifetimeBound : std::false_type {};
+
+template <typename StringType, typename Container>
+struct ShouldUseLifetimeBound<
+ StringType, Container,
+ std::enable_if_t<
+ std::is_same<StringType, TString>::value &&
+ std::is_same<typename Container::value_type, y_absl::string_view>::value>>
+ : std::true_type {};
+
+template <typename StringType, typename First, typename Second>
+using ShouldUseLifetimeBoundForPair = std::integral_constant<
+ bool, std::is_same<StringType, TString>::value &&
+ (std::is_same<First, y_absl::string_view>::value ||
+ std::is_same<Second, y_absl::string_view>::value)>;
+
+
// This class implements the range that is returned by y_absl::StrSplit(). This
// class has templated conversion operators that allow it to be implicitly
// converted to a variety of types that the caller may have specified on the
@@ -281,10 +299,24 @@ class Splitter {
// An implicit conversion operator that is restricted to only those containers
// that the splitter is convertible to.
- template <typename Container,
- typename = typename std::enable_if<
- SplitterIsConvertibleTo<Container>::value>::type>
- operator Container() const { // NOLINT(runtime/explicit)
+ template <
+ typename Container,
+ std::enable_if_t<ShouldUseLifetimeBound<StringType, Container>::value &&
+ SplitterIsConvertibleTo<Container>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Container() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return ConvertToContainer<Container, typename Container::value_type,
+ HasMappedType<Container>::value>()(*this);
+ }
+
+ template <
+ typename Container,
+ std::enable_if_t<!ShouldUseLifetimeBound<StringType, Container>::value &&
+ SplitterIsConvertibleTo<Container>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Container() const {
return ConvertToContainer<Container, typename Container::value_type,
HasMappedType<Container>::value>()(*this);
}
@@ -293,8 +325,27 @@ class Splitter {
// strings returned by the begin() iterator. Either/both of .first and .second
// will be constructed with empty strings if the iterator doesn't have a
// corresponding value.
+ template <typename First, typename Second,
+ std::enable_if_t<
+ ShouldUseLifetimeBoundForPair<StringType, First, Second>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator std::pair<First, Second>() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return ConvertToPair<First, Second>();
+ }
+
+ template <typename First, typename Second,
+ std::enable_if_t<!ShouldUseLifetimeBoundForPair<StringType, First,
+ Second>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator std::pair<First, Second>() const {
+ return ConvertToPair<First, Second>();
+ }
+
+ private:
template <typename First, typename Second>
- operator std::pair<First, Second>() const { // NOLINT(runtime/explicit)
+ std::pair<First, Second> ConvertToPair() const {
y_absl::string_view first, second;
auto it = begin();
if (it != end()) {
@@ -306,7 +357,6 @@ class Splitter {
return {First(first), Second(second)};
}
- private:
// ConvertToContainer is a functor converting a Splitter to the requested
// Container of ValueType. It is specialized below to optimize splitting to
// certain combinations of Container and ValueType.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc
index f02fa9465b..02ae2b23f5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.cc
@@ -14,6 +14,12 @@
#include "y_absl/strings/match.h"
+#include <algorithm>
+#include <cstdint>
+
+#include "y_absl/base/internal/endian.h"
+#include "y_absl/numeric/bits.h"
+#include "y_absl/strings/ascii.h"
#include "y_absl/strings/internal/memutil.h"
namespace y_absl {
@@ -27,6 +33,27 @@ bool EqualsIgnoreCase(y_absl::string_view piece1,
// memcasecmp uses y_absl::ascii_tolower().
}
+bool StrContainsIgnoreCase(y_absl::string_view haystack,
+ y_absl::string_view needle) noexcept {
+ while (haystack.size() >= needle.size()) {
+ if (StartsWithIgnoreCase(haystack, needle)) return true;
+ haystack.remove_prefix(1);
+ }
+ return false;
+}
+
+bool StrContainsIgnoreCase(y_absl::string_view haystack,
+ char needle) noexcept {
+ char upper_needle = y_absl::ascii_toupper(static_cast<unsigned char>(needle));
+ char lower_needle = y_absl::ascii_tolower(static_cast<unsigned char>(needle));
+ if (upper_needle == lower_needle) {
+ return StrContains(haystack, needle);
+ } else {
+ const char both_cstr[3] = {lower_needle, upper_needle, '\0'};
+ return haystack.find_first_of(both_cstr) != y_absl::string_view::npos;
+ }
+}
+
bool StartsWithIgnoreCase(y_absl::string_view text,
y_absl::string_view prefix) noexcept {
return (text.size() >= prefix.size()) &&
@@ -39,5 +66,65 @@ bool EndsWithIgnoreCase(y_absl::string_view text,
EqualsIgnoreCase(text.substr(text.size() - suffix.size()), suffix);
}
+y_absl::string_view FindLongestCommonPrefix(y_absl::string_view a,
+ y_absl::string_view b) {
+ const y_absl::string_view::size_type limit = std::min(a.size(), b.size());
+ const char* const pa = a.data();
+ const char* const pb = b.data();
+ y_absl::string_view::size_type count = (unsigned) 0;
+
+ if (Y_ABSL_PREDICT_FALSE(limit < 8)) {
+ while (Y_ABSL_PREDICT_TRUE(count + 2 <= limit)) {
+ uint16_t xor_bytes = y_absl::little_endian::Load16(pa + count) ^
+ y_absl::little_endian::Load16(pb + count);
+ if (Y_ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+ if (Y_ABSL_PREDICT_TRUE((xor_bytes & 0xff) == 0)) ++count;
+ return y_absl::string_view(pa, count);
+ }
+ count += 2;
+ }
+ if (Y_ABSL_PREDICT_TRUE(count != limit)) {
+ if (Y_ABSL_PREDICT_TRUE(pa[count] == pb[count])) ++count;
+ }
+ return y_absl::string_view(pa, count);
+ }
+
+ do {
+ uint64_t xor_bytes = y_absl::little_endian::Load64(pa + count) ^
+ y_absl::little_endian::Load64(pb + count);
+ if (Y_ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+ count += static_cast<uint64_t>(y_absl::countr_zero(xor_bytes) >> 3);
+ return y_absl::string_view(pa, count);
+ }
+ count += 8;
+ } while (Y_ABSL_PREDICT_TRUE(count + 8 < limit));
+
+ count = limit - 8;
+ uint64_t xor_bytes = y_absl::little_endian::Load64(pa + count) ^
+ y_absl::little_endian::Load64(pb + count);
+ if (Y_ABSL_PREDICT_TRUE(xor_bytes != 0)) {
+ count += static_cast<uint64_t>(y_absl::countr_zero(xor_bytes) >> 3);
+ return y_absl::string_view(pa, count);
+ }
+ return y_absl::string_view(pa, limit);
+}
+
+y_absl::string_view FindLongestCommonSuffix(y_absl::string_view a,
+ y_absl::string_view b) {
+ const y_absl::string_view::size_type limit = std::min(a.size(), b.size());
+ if (limit == 0) return y_absl::string_view();
+
+ const char* pa = a.data() + a.size() - 1;
+ const char* pb = b.data() + b.size() - 1;
+ y_absl::string_view::size_type count = (unsigned) 0;
+ while (count < limit && *pa == *pb) {
+ --pa;
+ --pb;
+ ++count;
+ }
+
+ return y_absl::string_view(++pa, count);
+}
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h
index 4564eff97e..9b35acaa8c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/match.h
@@ -72,6 +72,15 @@ inline bool EndsWith(y_absl::string_view text,
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
suffix.size()) == 0);
}
+// StrContainsIgnoreCase()
+//
+// Returns whether a given ASCII string `haystack` contains the ASCII substring
+// `needle`, ignoring case in the comparison.
+bool StrContainsIgnoreCase(y_absl::string_view haystack,
+ y_absl::string_view needle) noexcept;
+
+bool StrContainsIgnoreCase(y_absl::string_view haystack,
+ char needle) noexcept;
// EqualsIgnoreCase()
//
@@ -94,6 +103,16 @@ bool StartsWithIgnoreCase(y_absl::string_view text,
bool EndsWithIgnoreCase(y_absl::string_view text,
y_absl::string_view suffix) noexcept;
+// Yields the longest prefix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+y_absl::string_view FindLongestCommonPrefix(y_absl::string_view a,
+ y_absl::string_view b);
+
+// Yields the longest suffix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+y_absl::string_view FindLongestCommonSuffix(y_absl::string_view a,
+ y_absl::string_view b);
+
Y_ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
index af1ecfd6d4..d607026d01 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.cc
@@ -31,7 +31,9 @@
#include <utility>
#include "y_absl/base/attributes.h"
+#include "y_absl/base/internal/endian.h"
#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/optimization.h"
#include "y_absl/numeric/bits.h"
#include "y_absl/strings/ascii.h"
#include "y_absl/strings/charconv.h"
@@ -136,82 +138,132 @@ bool SimpleAtob(y_absl::string_view str, bool* out) {
namespace {
-// Used to optimize printing a decimal number's final digit.
-const char one_ASCII_final_digits[10][2] {
- {'0', 0}, {'1', 0}, {'2', 0}, {'3', 0}, {'4', 0},
- {'5', 0}, {'6', 0}, {'7', 0}, {'8', 0}, {'9', 0},
-};
+// Various routines to encode integers to strings.
+
+// We split data encodings into a group of 2 digits, 4 digits, 8 digits as
+// it's easier to combine powers of two into scalar arithmetic.
+
+// Previous implementation used a lookup table of 200 bytes for every 2 bytes
+// and it was memory bound, any L1 cache miss would result in a much slower
+// result. When benchmarking with a cache eviction rate of several percent,
+// this implementation proved to be better.
+
+// These constants represent '00', '0000' and '00000000' as ascii strings in
+// integers. We can add these numbers if we encode to bytes from 0 to 9. as
+// 'i' = '0' + i for 0 <= i <= 9.
+constexpr uint32_t kTwoZeroBytes = 0x0101 * '0';
+constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
+constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
+
+// * 103 / 1024 is a division by 10 for values from 0 to 99. It's also a
+// division of a structure [k takes 2 bytes][m takes 2 bytes], then * 103 / 1024
+// will be [k / 10][m / 10]. It allows parallel division.
+constexpr uint64_t kDivisionBy10Mul = 103u;
+constexpr uint64_t kDivisionBy10Div = 1 << 10;
+
+// * 10486 / 1048576 is a division by 100 for values from 0 to 9999.
+constexpr uint64_t kDivisionBy100Mul = 10486u;
+constexpr uint64_t kDivisionBy100Div = 1 << 20;
+
+// Encode functions write the ASCII output of input `n` to `out_str`.
+inline char* EncodeHundred(uint32_t n, char* out_str) {
+ int num_digits = static_cast<int>(n - 10) >> 8;
+ uint32_t base = kTwoZeroBytes;
+ uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = n - 10u * div10;
+ base += div10 + (mod10 << 8);
+ base >>= num_digits & 8;
+ little_endian::Store16(out_str, static_cast<uint16_t>(base));
+ return out_str + 2 + num_digits;
+}
-} // namespace
+inline char* EncodeTenThousand(uint32_t n, char* out_str) {
+ // We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
+ // blocks. 123 -> [\0\1][\0\23]. We divide by 10 both blocks
+ // (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
+ // parallel". Then we combine both results to have both ASCII digits,
+ // strip trailing zeros, add ASCII '0000' and return.
+ uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
+ uint32_t mod100 = n - 100ull * div100;
+ uint32_t hundreds = (mod100 << 16) + div100;
+ uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+ tens &= (0xFull << 16) | 0xFull;
+ tens += (hundreds - 10ull * tens) << 8;
+ Y_ABSL_ASSUME(tens != 0);
+ // The result can contain trailing zero bits, we need to strip them to a first
+ // significant byte in a final representation. For example, for n = 123, we
+ // have tens to have representation \0\1\2\3. We do `& -8` to round
+ // to a multiple to 8 to strip zero bytes, not all zero bits.
+ // countr_zero to help.
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes = static_cast<uint32_t>(y_absl::countr_zero(tens)) & (0 - 8ull);
+ tens += kFourZeroBytes;
+ tens >>= zeroes;
+ little_endian::Store32(out_str, tens);
+ return out_str + sizeof(tens) - zeroes / 8;
+}
-char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) {
- uint32_t digits;
- // The idea of this implementation is to trim the number of divides to as few
- // as possible, and also reducing memory stores and branches, by going in
- // steps of two digits at a time rather than one whenever possible.
- // The huge-number case is first, in the hopes that the compiler will output
- // that case in one branch-free block of code, and only output conditional
- // branches into it from below.
- if (i >= 1000000000) { // >= 1,000,000,000
- digits = i / 100000000; // 100,000,000
- i -= digits * 100000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt100_000_000:
- digits = i / 1000000; // 1,000,000
- i -= digits * 1000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt1_000_000:
- digits = i / 10000; // 10,000
- i -= digits * 10000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt10_000:
- digits = i / 100;
- i -= digits * 100;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt100:
- digits = i;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- *buffer = 0;
- return buffer;
- }
+// Prepare functions return an integer that should be written to out_str
+// (but possibly include trailing zeros).
+// For hi < 10000, lo < 10000 returns uint64_t as encoded in ASCII with
+// possibly trailing zeroes of the number hi * 10000 + lo.
+inline uint64_t PrepareTenThousands(uint64_t hi, uint64_t lo) {
+ uint64_t merged = hi | (lo << 32);
+ uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
+ ((0x7Full << 32) | 0x7Full);
+ uint64_t mod100 = merged - 100ull * div100;
+ uint64_t hundreds = (mod100 << 16) + div100;
+ uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+ tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
+ tens += (hundreds - 10ull * tens) << 8;
+ return tens;
+}
- if (i < 100) {
- digits = i;
- if (i >= 10) goto lt100;
- memcpy(buffer, one_ASCII_final_digits[i], 2);
- return buffer + 1;
+inline char* EncodeFullU32(uint32_t n, char* out_str) {
+ if (n < 100'000'000) {
+ uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+ Y_ABSL_ASSUME(bottom != 0);
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes = static_cast<uint32_t>(y_absl::countr_zero(bottom))
+ & (0 - 8ull);
+ uint64_t bottom_res = bottom + kEightZeroBytes;
+ bottom_res >>= zeroes;
+ little_endian::Store64(out_str, bottom_res);
+ return out_str + sizeof(bottom) - zeroes / 8;
}
- if (i < 10000) { // 10,000
- if (i >= 1000) goto lt10_000;
- digits = i / 100;
- i -= digits * 100;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt100;
- }
- if (i < 1000000) { // 1,000,000
- if (i >= 100000) goto lt1_000_000;
- digits = i / 10000; // 10,000
- i -= digits * 10000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt10_000;
+ uint32_t top = n / 100'000'000;
+ n %= 100'000'000;
+ uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+ uint64_t bottom_res = bottom + kEightZeroBytes;
+ out_str = EncodeHundred(top, out_str);
+ little_endian::Store64(out_str, bottom_res);
+ return out_str + sizeof(bottom);
+}
+
+} // namespace
+
+void numbers_internal::PutTwoDigits(uint32_t i, char* buf) {
+ assert(i < 100);
+ uint32_t base = kTwoZeroBytes;
+ uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = i - 10u * div10;
+ base += div10 + (mod10 << 8);
+ little_endian::Store16(buf, static_cast<uint16_t>(base));
+}
+
+char* numbers_internal::FastIntToBuffer(uint32_t n, char* out_str) {
+ if (n < 100) {
+ out_str = EncodeHundred(n, out_str);
+ goto set_last_zero;
}
- if (i < 100000000) { // 100,000,000
- if (i >= 10000000) goto lt100_000_000;
- digits = i / 1000000; // 1,000,000
- i -= digits * 1000000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt1_000_000;
+ if (n < 10000) {
+ out_str = EncodeTenThousand(n, out_str);
+ goto set_last_zero;
}
- // we already know that i < 1,000,000,000
- digits = i / 100000000; // 100,000,000
- i -= digits * 100000000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt100_000_000;
+ out_str = EncodeFullU32(n, out_str);
+set_last_zero:
+ *out_str = '\0';
+ return out_str;
}
char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
@@ -219,7 +271,7 @@ char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
if (i < 0) {
*buffer++ = '-';
// We need to do the negation in modular (i.e., "unsigned")
- // arithmetic; MSVC++ apprently warns for plain "-u", so
+ // arithmetic; MSVC++ apparently warns for plain "-u", so
// we write the equivalent expression "0 - u" instead.
u = 0 - u;
}
@@ -230,41 +282,40 @@ char* numbers_internal::FastIntToBuffer(uint64_t i, char* buffer) {
uint32_t u32 = static_cast<uint32_t>(i);
if (u32 == i) return numbers_internal::FastIntToBuffer(u32, buffer);
- // Here we know i has at least 10 decimal digits.
- uint64_t top_1to11 = i / 1000000000;
- u32 = static_cast<uint32_t>(i - top_1to11 * 1000000000);
- uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11);
+ // 10**9 < 2**32 <= i < 10**10, we can do 2+8
+ uint64_t div08 = i / 100'000'000ull;
+ uint64_t mod08 = i % 100'000'000ull;
+ uint64_t mod_result =
+ PrepareTenThousands(mod08 / 10000, mod08 % 10000) + kEightZeroBytes;
+ if (i < 10'000'000'000ull) {
+ buffer = EncodeHundred(static_cast<uint32_t>(div08), buffer);
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
+ }
- if (top_1to11_32 == top_1to11) {
- buffer = numbers_internal::FastIntToBuffer(top_1to11_32, buffer);
+ // i < 10**16, in this case 8+8
+ if (i < 10'000'000'000'000'000ull) {
+ buffer = EncodeFullU32(static_cast<uint32_t>(div08), buffer);
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
} else {
- // top_1to11 has more than 32 bits too; print it in two steps.
- uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100);
- uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100);
- buffer = numbers_internal::FastIntToBuffer(top_8to9, buffer);
- PutTwoDigits(mid_2, buffer);
- buffer += 2;
+ // 4 + 8 + 8
+ uint64_t div016 = i / 10'000'000'000'000'000ull;
+ buffer = EncodeTenThousand(static_cast<uint32_t>(div016), buffer);
+ uint64_t mid_result = div08 - div016 * 100'000'000ull;
+ mid_result = PrepareTenThousands(mid_result / 10000, mid_result % 10000) +
+ kEightZeroBytes;
+ little_endian::Store64(buffer, mid_result);
+ buffer += 8;
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
}
-
- // We have only 9 digits now, again the maximum uint32_t can handle fully.
- uint32_t digits = u32 / 10000000; // 10,000,000
- u32 -= digits * 10000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 100000; // 100,000
- u32 -= digits * 100000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 1000; // 1,000
- u32 -= digits * 1000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 10;
- u32 -= digits * 10;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- memcpy(buffer, one_ASCII_final_digits[u32], 2);
- return buffer + 1;
+set_last_zero:
+ *buffer = '\0';
+ return buffer;
}
char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) {
@@ -1048,25 +1099,6 @@ Y_ABSL_CONST_INIT Y_ABSL_DLL const char kHexTable[513] =
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
-Y_ABSL_CONST_INIT Y_ABSL_DLL const char two_ASCII_digits[100][2] = {
- {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'},
- {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'},
- {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'},
- {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'},
- {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'},
- {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'},
- {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'},
- {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'},
- {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'},
- {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'},
- {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'},
- {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'},
- {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'},
- {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'},
- {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'},
- {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'},
- {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}};
-
bool safe_strto32_base(y_absl::string_view text, int32_t* value, int base) {
return safe_int_internal<int32_t>(text, value, base);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
index b16ac44d0c..c0f1a75642 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/numbers.h
@@ -125,8 +125,6 @@ namespace numbers_internal {
Y_ABSL_DLL extern const char kHexChar[17]; // 0123456789abcdef
Y_ABSL_DLL extern const char
kHexTable[513]; // 000102030405060708090a0b0c0d0e0f1011...
-Y_ABSL_DLL extern const char
- two_ASCII_digits[100][2]; // 00, 01, 02, 03...
// Writes a two-character representation of 'i' to 'buf'. 'i' must be in the
// range 0 <= i < 100, and buf must have space for two characters. Example:
@@ -134,10 +132,7 @@ Y_ABSL_DLL extern const char
// PutTwoDigits(42, buf);
// // buf[0] == '4'
// // buf[1] == '2'
-inline void PutTwoDigits(size_t i, char* buf) {
- assert(i < 100);
- memcpy(buf, two_ASCII_digits[i], 2);
-}
+void PutTwoDigits(uint32_t i, char* buf);
// safe_strto?() functions for implementing SimpleAtoi()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc
index 29c3361c30..36ebfbeb1f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.cc
@@ -30,55 +30,6 @@
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
-AlphaNum::AlphaNum(Hex hex) {
- static_assert(numbers_internal::kFastToBufferSize >= 32,
- "This function only works when output buffer >= 32 bytes long");
- char* const end = &digits_[numbers_internal::kFastToBufferSize];
- auto real_width =
- y_absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
- if (real_width >= hex.width) {
- piece_ = y_absl::string_view(end - real_width, real_width);
- } else {
- // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
- // max pad width can be up to 20.
- std::memset(end - 32, hex.fill, 16);
- // Patch up everything else up to the real_width.
- std::memset(end - real_width - 16, hex.fill, 16);
- piece_ = y_absl::string_view(end - hex.width, hex.width);
- }
-}
-
-AlphaNum::AlphaNum(Dec dec) {
- assert(dec.width <= numbers_internal::kFastToBufferSize);
- char* const end = &digits_[numbers_internal::kFastToBufferSize];
- char* const minfill = end - dec.width;
- char* writer = end;
- uint64_t value = dec.value;
- bool neg = dec.neg;
- while (value > 9) {
- *--writer = '0' + (value % 10);
- value /= 10;
- }
- *--writer = '0' + static_cast<char>(value);
- if (neg) *--writer = '-';
-
- ptrdiff_t fillers = writer - minfill;
- if (fillers > 0) {
- // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
- // But...: if the fill character is '0', then it's <+/-><fill><digits>
- bool add_sign_again = false;
- if (neg && dec.fill == '0') { // If filling with '0',
- ++writer; // ignore the sign we just added
- add_sign_again = true; // and re-add the sign later.
- }
- writer -= fillers;
- std::fill_n(writer, fillers, dec.fill);
- if (add_sign_again) *--writer = '-';
- }
-
- piece_ = y_absl::string_view(writer, static_cast<size_t>(end - writer));
-}
-
// ----------------------------------------------------------------------
// StrCat()
// This merges the given strings or integers, with no delimiter. This
@@ -195,7 +146,13 @@ void AppendPieces(TString* dest,
void StrAppend(TString* dest, const AlphaNum& a) {
ASSERT_NO_OVERLAP(*dest, a);
- dest->append(a.data(), a.size());
+ TString::size_type old_size = dest->size();
+ strings_internal::STLStringResizeUninitializedAmortized(dest,
+ old_size + a.size());
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ out = Append(out, a);
+ assert(out == begin + dest->size());
}
void StrAppend(TString* dest, const AlphaNum& a, const AlphaNum& b) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
index f64d522149..37de3bf9cd 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_cat.h
@@ -87,13 +87,16 @@
#ifndef Y_ABSL_STRINGS_STR_CAT_H_
#define Y_ABSL_STRINGS_STR_CAT_H_
+#include <algorithm>
#include <array>
#include <cstdint>
+#include <cstring>
#include <util/generic/string.h>
#include <type_traits>
#include <utility>
#include <vector>
+#include "y_absl/base/attributes.h"
#include "y_absl/base/port.h"
#include "y_absl/strings/internal/has_absl_stringify.h"
#include "y_absl/strings/internal/stringify_sink.h"
@@ -201,6 +204,27 @@ struct Hex {
explicit Hex(Pointee* v, PadSpec spec = y_absl::kNoPad)
: Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
+ template <typename S>
+ friend void AbslStringify(S& sink, Hex hex) {
+ static_assert(
+ numbers_internal::kFastToBufferSize >= 32,
+ "This function only works when output buffer >= 32 bytes long");
+ char buffer[numbers_internal::kFastToBufferSize];
+ char* const end = &buffer[numbers_internal::kFastToBufferSize];
+ auto real_width =
+ y_absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
+ if (real_width >= hex.width) {
+ sink.Append(y_absl::string_view(end - real_width, real_width));
+ } else {
+ // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
+ // max pad width can be up to 20.
+ std::memset(end - 32, hex.fill, 16);
+ // Patch up everything else up to the real_width.
+ std::memset(end - real_width - 16, hex.fill, 16);
+ sink.Append(y_absl::string_view(end - hex.width, hex.width));
+ }
+ }
+
private:
Hex(PadSpec spec, uint64_t v)
: value(v),
@@ -235,6 +259,38 @@ struct Dec {
: spec - y_absl::kZeroPad2 + 2),
fill(spec >= y_absl::kSpacePad2 ? ' ' : '0'),
neg(v < 0) {}
+
+ template <typename S>
+ friend void AbslStringify(S& sink, Dec dec) {
+ assert(dec.width <= numbers_internal::kFastToBufferSize);
+ char buffer[numbers_internal::kFastToBufferSize];
+ char* const end = &buffer[numbers_internal::kFastToBufferSize];
+ char* const minfill = end - dec.width;
+ char* writer = end;
+ uint64_t val = dec.value;
+ while (val > 9) {
+ *--writer = '0' + (val % 10);
+ val /= 10;
+ }
+ *--writer = '0' + static_cast<char>(val);
+ if (dec.neg) *--writer = '-';
+
+ ptrdiff_t fillers = writer - minfill;
+ if (fillers > 0) {
+ // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
+ // But...: if the fill character is '0', then it's <+/-><fill><digits>
+ bool add_sign_again = false;
+ if (dec.neg && dec.fill == '0') { // If filling with '0',
+ ++writer; // ignore the sign we just added
+ add_sign_again = true; // and re-add the sign later.
+ }
+ writer -= fillers;
+ std::fill_n(writer, fillers, dec.fill);
+ if (add_sign_again) *--writer = '-';
+ }
+
+ sink.Append(y_absl::string_view(writer, static_cast<size_t>(end - writer)));
+ }
};
// -----------------------------------------------------------------------------
@@ -282,28 +338,30 @@ class AlphaNum {
AlphaNum(double f) // NOLINT(runtime/explicit)
: piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
- AlphaNum(Hex hex); // NOLINT(runtime/explicit)
- AlphaNum(Dec dec); // NOLINT(runtime/explicit)
-
template <size_t size>
AlphaNum( // NOLINT(runtime/explicit)
- const strings_internal::AlphaNumBuffer<size>& buf)
+ const strings_internal::AlphaNumBuffer<size>& buf
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(&buf.data[0], buf.size) {}
- AlphaNum(const char* c_str) // NOLINT(runtime/explicit)
- : piece_(NullSafeStringView(c_str)) {} // NOLINT(runtime/explicit)
- AlphaNum(y_absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit)
+ AlphaNum(const char* c_str // NOLINT(runtime/explicit)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : piece_(NullSafeStringView(c_str)) {}
+ AlphaNum(y_absl::string_view pc // NOLINT(runtime/explicit)
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : piece_(pc) {}
template <typename T, typename = typename std::enable_if<
strings_internal::HasAbslStringify<T>::value>::type>
- AlphaNum( // NOLINT(runtime/explicit)
- const T& v, // NOLINT(runtime/explicit)
- strings_internal::StringifySink&& sink = {}) // NOLINT(runtime/explicit)
+ AlphaNum( // NOLINT(runtime/explicit)
+ const T& v Y_ABSL_ATTRIBUTE_LIFETIME_BOUND,
+ strings_internal::StringifySink&& sink Y_ABSL_ATTRIBUTE_LIFETIME_BOUND = {})
: piece_(strings_internal::ExtractStringification(sink, v)) {}
template <typename Allocator>
AlphaNum( // NOLINT(runtime/explicit)
- const std::basic_string<char, std::char_traits<char>, Allocator>& str)
+ const std::basic_string<char, std::char_traits<char>, Allocator>& str
+ Y_ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(str) {}
AlphaNum(const TString& str)
@@ -319,14 +377,24 @@ class AlphaNum {
const char* data() const { return piece_.data(); }
y_absl::string_view Piece() const { return piece_; }
- // Normal enums are already handled by the integer formatters.
- // This overload matches only scoped enums.
+ // Match unscoped enums. Use integral promotion so that a `char`-backed
+ // enum becomes a wider integral type AlphaNum will accept.
template <typename T,
typename = typename std::enable_if<
- std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+ std::is_enum<T>{} && std::is_convertible<T, int>{} &&
!strings_internal::HasAbslStringify<T>::value>::type>
AlphaNum(T e) // NOLINT(runtime/explicit)
- : AlphaNum(static_cast<typename std::underlying_type<T>::type>(e)) {}
+ : AlphaNum(+e) {}
+
+ // This overload matches scoped enums. We must explicitly cast to the
+ // underlying type, but use integral promotion for the same reason as above.
+ template <typename T,
+ typename std::enable_if<
+ std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+ !strings_internal::HasAbslStringify<T>::value,
+ char*>::type = nullptr>
+ AlphaNum(T e) // NOLINT(runtime/explicit)
+ : AlphaNum(+static_cast<typename std::underlying_type<T>::type>(e)) {}
// vector<bool>::reference and const_reference require special help to
// convert to `AlphaNum` because it requires two user defined conversions.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h
index 521581978d..ccf0a48c1c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_format.h
@@ -36,10 +36,12 @@
// * `y_absl::StreamFormat()` to more efficiently write a format string to a
// stream, such as`std::cout`.
// * `y_absl::PrintF()`, `y_absl::FPrintF()` and `y_absl::SNPrintF()` as
-// replacements for `std::printf()`, `std::fprintf()` and `std::snprintf()`.
+// drop-in replacements for `std::printf()`, `std::fprintf()` and
+// `std::snprintf()`.
//
-// Note: a version of `std::sprintf()` is not supported as it is
-// generally unsafe due to buffer overflows.
+// Note: An `y_absl::SPrintF()` drop-in replacement is not supported as it
+// is generally unsafe due to buffer overflows. Use `y_absl::StrFormat` which
+// returns the string as output instead of expecting a pre-allocated buffer.
//
// Additionally, you can provide a format string (and its associated arguments)
// using one of the following abstractions:
@@ -257,6 +259,7 @@ class FormatCountCapture {
// * Characters: `char`, `signed char`, `unsigned char`
// * Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`,
// `unsigned long`, `long long`, `unsigned long long`
+// * Enums: printed as their underlying integral value
// * Floating-point: `float`, `double`, `long double`
//
// However, in the `str_format` library, a format conversion specifies a broader
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc
index 1c52a152f8..02369f4719 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/str_split.cc
@@ -60,19 +60,23 @@ y_absl::string_view GenericFind(y_absl::string_view text,
// Finds using y_absl::string_view::find(), therefore the length of the found
// delimiter is delimiter.length().
struct LiteralPolicy {
- size_t Find(y_absl::string_view text, y_absl::string_view delimiter, size_t pos) {
+ static size_t Find(y_absl::string_view text, y_absl::string_view delimiter,
+ size_t pos) {
return text.find(delimiter, pos);
}
- size_t Length(y_absl::string_view delimiter) { return delimiter.length(); }
+ static size_t Length(y_absl::string_view delimiter) {
+ return delimiter.length();
+ }
};
// Finds using y_absl::string_view::find_first_of(), therefore the length of the
// found delimiter is 1.
struct AnyOfPolicy {
- size_t Find(y_absl::string_view text, y_absl::string_view delimiter, size_t pos) {
+ static size_t Find(y_absl::string_view text, y_absl::string_view delimiter,
+ size_t pos) {
return text.find_first_of(delimiter, pos);
}
- size_t Length(y_absl::string_view /* delimiter */) { return 1; }
+ static size_t Length(y_absl::string_view /* delimiter */) { return 1; }
};
} // namespace
@@ -123,8 +127,7 @@ ByLength::ByLength(ptrdiff_t length) : length_(length) {
Y_ABSL_RAW_CHECK(length > 0, "");
}
-y_absl::string_view ByLength::Find(y_absl::string_view text,
- size_t pos) const {
+y_absl::string_view ByLength::Find(y_absl::string_view text, size_t pos) const {
pos = std::min(pos, text.size()); // truncate `pos`
y_absl::string_view substr = text.substr(pos);
// If the string is shorter than the chunk size we say we
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
index fc31726ed4..93c50f0d0c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.cc
@@ -21,12 +21,35 @@
#include <cstring>
#include <ostream>
-#include "y_absl/strings/internal/memutil.h"
-
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace {
+
+// This is significantly faster for case-sensitive matches with very
+// few possible matches.
+const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
+ size_t neelen) {
+ if (0 == neelen) {
+ return phaystack; // even if haylen is 0
+ }
+ if (haylen < neelen) return nullptr;
+
+ const char* match;
+ const char* hayend = phaystack + haylen - neelen + 1;
+ // A static cast is used here to work around the fact that memchr returns
+ // a void* on Posix-compliant systems and const void* on Windows.
+ while (
+ (match = static_cast<const char*>(memchr(
+ phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
+ if (memcmp(match, pneedle, neelen) == 0)
+ return match;
+ else
+ phaystack = match + 1;
+ }
+ return nullptr;
+}
+
void WritePadding(std::ostream& o, size_t pad) {
char fill_buf[32];
memset(fill_buf, o.fill(), sizeof(fill_buf));
@@ -84,8 +107,7 @@ string_view::size_type string_view::find(string_view s,
if (empty() && pos == 0 && s.empty()) return 0;
return npos;
}
- const char* result =
- strings_internal::memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
+ const char* result = memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
return result ? static_cast<size_type>(result - ptr_) : npos;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
index 4d7b8b14bf..8b90baebc7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/strings/string_view.h
@@ -55,8 +55,6 @@ Y_ABSL_NAMESPACE_END
#else // Y_ABSL_USES_STD_STRING_VIEW
-#error "std::string_view should be used in all configurations"
-
#if Y_ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
(defined(__GNUC__) && !defined(__clang__)) || \
(defined(_MSC_VER) && _MSC_VER >= 1928)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.darwin-x86_64.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.darwin-x86_64.txt
index 7901d8cf24..d0a0e1930d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.darwin-x86_64.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.darwin-x86_64.txt
@@ -27,9 +27,15 @@ target_sources(abseil-cpp-tstring-y_absl-synchronization PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
- ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-aarch64.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-aarch64.txt
index f2e0472a5d..0198f69458 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-aarch64.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-aarch64.txt
@@ -28,9 +28,15 @@ target_sources(abseil-cpp-tstring-y_absl-synchronization PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
- ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-x86_64.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-x86_64.txt
index f2e0472a5d..0198f69458 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-x86_64.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.linux-x86_64.txt
@@ -28,9 +28,15 @@ target_sources(abseil-cpp-tstring-y_absl-synchronization PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
- ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.windows-x86_64.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.windows-x86_64.txt
index 7901d8cf24..d0a0e1930d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.windows-x86_64.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/CMakeLists.windows-x86_64.txt
@@ -27,9 +27,15 @@ target_sources(abseil-cpp-tstring-y_absl-synchronization PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/barrier.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/blocking_counter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
- ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
${CMAKE_SOURCE_DIR}/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/notification.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
index 95787d5605..6172692866 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
@@ -13,10 +13,12 @@
// limitations under the License.
#include <stdint.h>
+
#include <new>
// This file is a no-op if the required LowLevelAlloc support is missing.
#include "y_absl/base/internal/low_level_alloc.h"
+#include "y_absl/synchronization/internal/waiter.h"
#ifndef Y_ABSL_LOW_LEVEL_ALLOC_MISSING
#include <string.h>
@@ -71,6 +73,9 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) {
void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
PerThreadSem::Init(identity);
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
}
static void ResetThreadIdentityBetweenReuse(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
index 890bc10015..d837467e67 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
@@ -16,9 +16,7 @@
#include "y_absl/base/config.h"
-#ifdef _WIN32
-#include <windows.h>
-#else
+#ifndef _WIN32
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -34,6 +32,7 @@
#include <atomic>
#include <cstdint>
+#include <limits>
#include "y_absl/base/optimization.h"
#include "y_absl/synchronization/internal/kernel_timeout.h"
@@ -81,51 +80,64 @@ namespace synchronization_internal {
#if defined(SYS_futex_time64) && !defined(SYS_futex)
#define SYS_futex SYS_futex_time64
+using FutexTimespec = struct timespec;
+#else
+// Some libc implementations have switched to an unconditional 64-bit `time_t`
+// definition. This means that `struct timespec` may not match the layout
+// expected by the kernel ABI on 32-bit platforms. So we define the
+// FutexTimespec that matches the kernel timespec definition. It should be safe
+// to use this struct for 64-bit userspace builds too, since it will use another
+// SYS_futex kernel call with 64-bit tv_sec inside timespec.
+struct FutexTimespec {
+ long tv_sec; // NOLINT
+ long tv_nsec; // NOLINT
+};
#endif
class FutexImpl {
public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- long err = 0; // NOLINT(runtime/int)
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (Y_ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until the until
+ // woken by `Wake()`.
+ static int Wait(std::atomic<int32_t>* v, int32_t val) {
+ return WaitAbsoluteTimeout(v, val, nullptr);
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+ static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* abs_timeout) {
+ FutexTimespec ts;
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ auto err = syscall(
+ SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ ToFutexTimespec(abs_timeout, &ts), nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
- int32_t bits,
- const struct timespec *abstime) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
- nullptr, bits);
- if (Y_ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+ static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* rel_timeout) {
+ FutexTimespec ts;
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ auto err =
+ syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_PRIVATE_FLAG,
+ val, ToFutexTimespec(rel_timeout, &ts));
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ // Wakes at most `count` waiters that have entered the sleep state on `v`.
+ static int Wake(std::atomic<int32_t>* v, int32_t count) {
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (Y_ABSL_PREDICT_FALSE(err < 0)) {
return -errno;
@@ -133,16 +145,24 @@ class FutexImpl {
return 0;
}
- // FUTEX_WAKE_BITSET
- static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
- nullptr, bits);
- if (Y_ABSL_PREDICT_FALSE(err < 0)) {
- return -errno;
+ private:
+ static FutexTimespec* ToFutexTimespec(const struct timespec* userspace_ts,
+ FutexTimespec* futex_ts) {
+ if (userspace_ts == nullptr) {
+ return nullptr;
}
- return 0;
+
+ using FutexSeconds = decltype(futex_ts->tv_sec);
+ using FutexNanoseconds = decltype(futex_ts->tv_nsec);
+
+ constexpr auto kMaxSeconds{(std::numeric_limits<FutexSeconds>::max)()};
+ if (userspace_ts->tv_sec > kMaxSeconds) {
+ futex_ts->tv_sec = kMaxSeconds;
+ } else {
+ futex_ts->tv_sec = static_cast<FutexSeconds>(userspace_ts->tv_sec);
+ }
+ futex_ts->tv_nsec = static_cast<FutexNanoseconds>(userspace_ts->tv_nsec);
+ return futex_ts;
}
};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
new file mode 100644
index 0000000000..2ca374b712
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.cc
@@ -0,0 +1,111 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/futex_waiter.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_FUTEX_WAITER
+
+#include <atomic>
+#include <cstdint>
+#include <cerrno>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/futex.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char FutexWaiter::kName[];
+#endif
+
+int FutexWaiter::WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t) {
+#ifdef CLOCK_MONOTONIC
+ constexpr bool kHasClockMonotonic = true;
+#else
+ constexpr bool kHasClockMonotonic = false;
+#endif
+
+ // We can't call Futex::WaitUntil() here because the prodkernel implementation
+ // does not know about KernelTimeout::SupportsSteadyClock().
+ if (!t.has_timeout()) {
+ return Futex::Wait(v, val);
+ } else if (kHasClockMonotonic && KernelTimeout::SupportsSteadyClock() &&
+ t.is_relative_timeout()) {
+ auto rel_timespec = t.MakeRelativeTimespec();
+ return Futex::WaitRelativeTimeout(v, val, &rel_timespec);
+ } else {
+ auto abs_timespec = t.MakeAbsTimespec();
+ return Futex::WaitAbsoluteTimeout(v, val, &abs_timespec);
+ }
+}
+
+bool FutexWaiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ Y_ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void FutexWaiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void FutexWaiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (Y_ABSL_PREDICT_FALSE(err < 0)) {
+ Y_ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_FUTEX_WAITER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.h
new file mode 100644
index 0000000000..2ab269515e
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex_waiter.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/config.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/futex.h"
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_FUTEX
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define Y_ABSL_INTERNAL_HAVE_FUTEX_WAITER 1
+
+class FutexWaiter : public WaiterCrtp<FutexWaiter> {
+ public:
+ FutexWaiter() : futex_(0) {}
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "FutexWaiter";
+
+ private:
+ // Atomically check that `*v == val`, and if it is, then sleep until the
+ // timeout `t` has been reached, or until woken by `Wake()`.
+ static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t);
+
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
index 9d61ca0f2b..0053c1ed28 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
@@ -37,6 +37,7 @@
#include <algorithm>
#include <array>
+#include <cinttypes>
#include <limits>
#include "y_absl/base/internal/hide_ptr.h"
#include "y_absl/base/internal/raw_logging.h"
@@ -114,7 +115,7 @@ class Vec {
if (src->ptr_ == src->space_) {
// Need to actually copy
resize(src->size_);
- std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ std::copy_n(src->ptr_, src->size_, ptr_);
src->size_ = 0;
} else {
Discard();
@@ -148,7 +149,7 @@ class Vec {
size_t request = static_cast<size_t>(capacity_) * sizeof(T);
T* copy = static_cast<T*>(
base_internal::LowLevelAlloc::AllocWithArena(request, arena));
- std::copy(ptr_, ptr_ + size_, copy);
+ std::copy_n(ptr_, size_, copy);
Discard();
ptr_ = copy;
}
@@ -386,19 +387,22 @@ bool GraphCycles::CheckInvariants() const {
Node* nx = r->nodes_[x];
void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
- Y_ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ Y_ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
+ x, ptr);
}
if (nx->visited) {
- Y_ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ Y_ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
}
if (!ranks.insert(nx->rank)) {
- Y_ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ Y_ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
}
HASH_FOR_EACH(y, nx->out) {
Node* ny = r->nodes_[static_cast<uint32_t>(y)];
if (nx->rank >= ny->rank) {
- Y_ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
- nx->rank, ny->rank);
+ Y_ABSL_RAW_LOG(FATAL,
+ "Edge %" PRIu32 " ->%" PRId32
+ " has bad rank assignment %" PRId32 "->%" PRId32,
+ x, y, nx->rank, ny->rank);
}
}
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
new file mode 100644
index 0000000000..f99ecb6725
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.cc
@@ -0,0 +1,225 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <limits>
+
+#include "y_absl/base/attributes.h"
+#include "y_absl/base/call_once.h"
+#include "y_absl/base/config.h"
+#include "y_absl/time/time.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr uint64_t KernelTimeout::kNoTimeout;
+constexpr int64_t KernelTimeout::kMaxNanos;
+#endif
+
+int64_t KernelTimeout::SteadyClockNow() {
+ if (!SupportsSteadyClock()) {
+ return y_absl::GetCurrentTimeNanos();
+ }
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+}
+
+KernelTimeout::KernelTimeout(y_absl::Time t) {
+ // `y_absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (t == y_absl::InfiniteFuture()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t unix_nanos = y_absl::ToUnixNanos(t);
+
+ // A timeout that lands before the unix epoch is converted to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (unix_nanos < 0) {
+ unix_nanos = 0;
+ }
+
+ // Values greater than or equal to kMaxNanos are converted to infinite.
+ if (unix_nanos >= kMaxNanos) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ rep_ = static_cast<uint64_t>(unix_nanos) << 1;
+}
+
+KernelTimeout::KernelTimeout(y_absl::Duration d) {
+ // `y_absl::InfiniteDuration()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (d == y_absl::InfiniteDuration()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t nanos = y_absl::ToInt64Nanoseconds(d);
+
+ // Negative durations are normalized to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (nanos < 0) {
+ nanos = 0;
+ }
+
+ int64_t now = SteadyClockNow();
+ if (nanos > kMaxNanos - now) {
+ // Durations that would be greater than kMaxNanos are converted to infinite.
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ nanos += now;
+ rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
+}
+
+int64_t KernelTimeout::MakeAbsNanos() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+
+ if (is_relative_timeout()) {
+ // We need to change epochs, because the relative timeout might be
+ // represented by an absolute timestamp from another clock.
+ nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
+ int64_t now = y_absl::GetCurrentTimeNanos();
+ if (nanos > kMaxNanos - now) {
+ // Overflow.
+ nanos = kMaxNanos;
+ } else {
+ nanos += now;
+ }
+ } else if (nanos == 0) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch.
+ nanos = 1;
+ }
+
+ return nanos;
+}
+
+int64_t KernelTimeout::InNanosecondsFromNow() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ return std::max<int64_t>(nanos - y_absl::GetCurrentTimeNanos(), 0);
+ }
+ return std::max<int64_t>(nanos - SteadyClockNow(), 0);
+}
+
+struct timespec KernelTimeout::MakeAbsTimespec() const {
+ return y_absl::ToTimespec(y_absl::Nanoseconds(MakeAbsNanos()));
+}
+
+struct timespec KernelTimeout::MakeRelativeTimespec() const {
+ return y_absl::ToTimespec(y_absl::Nanoseconds(InNanosecondsFromNow()));
+}
+
+#ifndef _WIN32
+struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
+ if (!has_timeout()) {
+ return y_absl::ToTimespec(y_absl::Nanoseconds(kMaxNanos));
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ nanos -= y_absl::GetCurrentTimeNanos();
+ } else {
+ nanos -= SteadyClockNow();
+ }
+
+ struct timespec now;
+ Y_ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
+ y_absl::Duration from_clock_epoch =
+ y_absl::DurationFromTimespec(now) + y_absl::Nanoseconds(nanos);
+ if (from_clock_epoch <= y_absl::ZeroDuration()) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch. For safety we also do not return
+ // negative values.
+ return y_absl::ToTimespec(y_absl::Nanoseconds(1));
+ }
+ return y_absl::ToTimespec(from_clock_epoch);
+}
+#endif
+
+KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = std::numeric_limits<DWord>::max();
+
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+
+ constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
+ constexpr uint64_t kMaxValueNanos =
+ std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
+
+ uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
+ if (ns_from_now >= kMaxValueNanos) {
+ // Rounding up would overflow.
+ return kInfinite;
+ }
+ // Convert to milliseconds, always rounding up.
+ uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+}
+
+std::chrono::time_point<std::chrono::system_clock>
+KernelTimeout::ToChronoTimePoint() const {
+ if (!has_timeout()) {
+ return std::chrono::time_point<std::chrono::system_clock>::max();
+ }
+
+ // The cast to std::microseconds is because (on some platforms) the
+ // std::ratio used by std::chrono::steady_clock doesn't convert to
+ // std::nanoseconds, so it doesn't compile.
+ auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
+ std::chrono::nanoseconds(MakeAbsNanos()));
+ return std::chrono::system_clock::from_time_t(0) + micros;
+}
+
+std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
+ if (!has_timeout()) {
+ return std::chrono::nanoseconds::max();
+ }
+ return std::chrono::nanoseconds(InNanosecondsFromNow());
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
index a39d09ca86..64466f80d9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
@@ -11,26 +11,21 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with y_absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a y_absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components. Higher-level components should build APIs based on
-// y_absl::Time and y_absl::Duration.
#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#define Y_ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#include <time.h>
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
#include <cstdint>
+#include <ctime>
#include <limits>
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/time/clock.h"
#include "y_absl/time/time.h"
@@ -39,58 +34,73 @@ namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-class Waiter;
-
+// An optional timeout, with nanosecond granularity.
+//
+// This is a private low-level API for use by a handful of low-level
+// components. Higher-level components should build APIs based on
+// y_absl::Time and y_absl::Duration.
class KernelTimeout {
public:
- // A timeout that should expire at <t>. Any value, in the full
- // InfinitePast() to InfiniteFuture() range, is valid here and will be
- // respected.
- explicit KernelTimeout(y_absl::Time t) : ns_(MakeNs(t)) {}
- // No timeout.
- KernelTimeout() : ns_(0) {}
+ // Construct an absolute timeout that should expire at `t`.
+ explicit KernelTimeout(y_absl::Time t);
- // A more explicit factory for those who prefer it. Equivalent to {}.
- static KernelTimeout Never() { return {}; }
+ // Construct a relative timeout that should expire after `d`.
+ explicit KernelTimeout(y_absl::Duration d);
- // We explicitly do not support other custom formats: timespec, int64_t nanos.
- // Unify on this and y_absl::Time, please.
+ // Infinite timeout.
+ constexpr KernelTimeout() : rep_(kNoTimeout) {}
- bool has_timeout() const { return ns_ != 0; }
+ // A more explicit factory for those who prefer it.
+ // Equivalent to `KernelTimeout()`.
+ static constexpr KernelTimeout Never() { return KernelTimeout(); }
- // Convert to parameter for sem_timedwait/futex/similar. Only for approved
- // users. Do not call if !has_timeout.
+ // Returns true if there is a timeout that will eventually expire.
+ // Returns false if the timeout is infinite.
+ bool has_timeout() const { return rep_ != kNoTimeout; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `y_absl::Time`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_absolute_timeout() const { return (rep_ & 1) == 0; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `y_absl::Duration`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_relative_timeout() const { return (rep_ & 1) == 1; }
+
+ // Convert to `struct timespec` for interfaces that expect an absolute
+ // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to
+ // a reasonable absolute timeout, but callers should to test has_timeout() and
+ // is_relative_timeout() and prefer to use a more appropriate interface.
struct timespec MakeAbsTimespec() const;
- // Convert to unix epoch nanos. Do not call if !has_timeout.
+ // Convert to `struct timespec` for interfaces that expect a relative
+ // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to
+ // a reasonable relative timeout, but callers should to test has_timeout() and
+ // is_absolute_timeout() and prefer to use a more appropriate interface. Since
+ // the return value is a relative duration, it should be recomputed by calling
+ // this method in the case of a spurious wakeup.
+ struct timespec MakeRelativeTimespec() const;
+
+#ifndef _WIN32
+ // Convert to `struct timespec` for interfaces that expect an absolute timeout
+ // on a specific clock `c`. This is similar to `MakeAbsTimespec()`, but
+ // callers usually want to use this method with `CLOCK_MONOTONIC` when
+ // relative timeouts are requested, and when the appropriate interface expects
+ // an absolute timeout relative to a specific clock (for example,
+ // pthread_cond_clockwait() or sem_clockwait()). If !has_timeout(), attempts
+ // to convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() prefer to use a more appropriate interface.
+ struct timespec MakeClockAbsoluteTimespec(clockid_t c) const;
+#endif
+
+ // Convert to unix epoch nanos for interfaces that expect an absolute timeout
+ // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to
+ // convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() and is_relative_timeout() and prefer to use a more
+ // appropriate interface.
int64_t MakeAbsNanos() const;
- private:
- // internal rep, not user visible: ns after unix epoch.
- // zero = no timeout.
- // Negative we treat as an unlikely (and certainly expired!) but valid
- // timeout.
- int64_t ns_;
-
- static int64_t MakeNs(y_absl::Time t) {
- // optimization--InfiniteFuture is common "no timeout" value
- // and cheaper to compare than convert.
- if (t == y_absl::InfiniteFuture()) return 0;
- int64_t x = ToUnixNanos(t);
-
- // A timeout that lands exactly on the epoch (x=0) needs to be respected,
- // so we alter it unnoticably to 1. Negative timeouts are in
- // theory supported, but handled poorly by the kernel (long
- // delays) so push them forward too; since all such times have
- // already passed, it's indistinguishable.
- if (x <= 0) x = 1;
- // A time larger than what can be represented to the kernel is treated
- // as no timeout.
- if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
- return x;
- }
-
-#ifdef _WIN32
// Converts to milliseconds from now, or INFINITE when
// !has_timeout(). For use by SleepConditionVariableSRW on
// Windows. Callers should recognize that the return value is a
@@ -100,68 +110,66 @@ class KernelTimeout {
// so we define our own DWORD and INFINITE instead of getting them from
// <intsafe.h> and <WinBase.h>.
typedef unsigned long DWord; // NOLINT
- DWord InMillisecondsFromNow() const {
- constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
- if (!has_timeout()) {
- return kInfinite;
- }
- // The use of y_absl::Now() to convert from absolute time to
- // relative time means that y_absl::Now() cannot use anything that
- // depends on KernelTimeout (for example, Mutex) on Windows.
- int64_t now = ToUnixNanos(y_absl::Now());
- if (ns_ >= now) {
- // Round up so that Now() + ms_from_now >= ns_.
- constexpr uint64_t max_nanos =
- (std::numeric_limits<int64_t>::max)() - 999999u;
- uint64_t ms_from_now =
- ((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) /
- 1000000u;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
- }
- return 0;
- }
-
- friend class Waiter;
-#endif
-};
+ DWord InMillisecondsFromNow() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect an absolute
+ // timeout, like std::condition_variable::wait_until(). If !has_timeout() or
+ // is_relative_timeout(), attempts to convert to a reasonable absolute
+ // timeout, but callers should test has_timeout() and is_relative_timeout()
+ // and prefer to use a more appropriate interface.
+ std::chrono::time_point<std::chrono::system_clock> ToChronoTimePoint() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect a relative
+ // timeout, like std::condition_variable::wait_for(). If !has_timeout() or
+ // is_absolute_timeout(), attempts to convert to a reasonable relative
+ // timeout, but callers should test has_timeout() and is_absolute_timeout()
+ // and prefer to use a more appropriate interface. Since the return value is a
+ // relative duration, it should be recomputed by calling this method in the
+ // case of a spurious wakeup.
+ std::chrono::nanoseconds ToChronoDuration() const;
+
+ // Returns true if steady (aka monotonic) clocks are supported by the system.
+ // This method exists because go/btm requires synchronized clocks, and
+ // thus requires we use the system (aka walltime) clock.
+ static constexpr bool SupportsSteadyClock() { return true; }
-inline struct timespec KernelTimeout::MakeAbsTimespec() const {
- int64_t n = ns_;
- static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
- if (n == 0) {
- Y_ABSL_RAW_LOG(
- ERROR, "Tried to create a timespec from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- n = (std::numeric_limits<int64_t>::max)();
- }
-
- // Kernel APIs validate timespecs as being at or after the epoch,
- // despite the kernel time type being signed. However, no one can
- // tell the difference between a timeout at or before the epoch (since
- // all such timeouts have expired!)
- if (n < 0) n = 0;
-
- struct timespec abstime;
- int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
- abstime.tv_sec = static_cast<time_t>(seconds);
- abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
- return abstime;
-}
-
-inline int64_t KernelTimeout::MakeAbsNanos() const {
- if (ns_ == 0) {
- Y_ABSL_RAW_LOG(
- ERROR, "Tried to create a timeout from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- return (std::numeric_limits<int64_t>::max)();
- }
-
- return ns_;
-}
+ private:
+ // Returns the current time, expressed as a count of nanoseconds since the
+ // epoch used by an arbitrary clock. The implementation tries to use a steady
+ // (monotonic) clock if one is available.
+ static int64_t SteadyClockNow();
+
+ // Internal representation.
+ // - If the value is kNoTimeout, then the timeout is infinite, and
+ // has_timeout() will return true.
+ // - If the low bit is 0, then the high 63 bits is the number of nanoseconds
+ // after the unix epoch.
+ // - If the low bit is 1, then the high 63 bits is the number of nanoseconds
+ // after the epoch used by SteadyClockNow().
+ //
+ // In all cases the time is stored as an absolute time, the only difference is
+ // the clock epoch. The use of absolute times is important since in the case
+ // of a relative timeout with a spurious wakeup, the program would have to
+ // restart the wait, and thus needs a way of recomputing the remaining time.
+ uint64_t rep_;
+
+ // Returns the number of nanoseconds stored in the internal representation.
+ // When combined with the clock epoch indicated by the low bit (which is
+ // accessed through is_absolute_timeout() and is_relative_timeout()), the
+ // return value is used to compute when the timeout should occur.
+ int64_t RawAbsNanos() const { return static_cast<int64_t>(rep_ >> 1); }
+
+ // Converts to nanoseconds from now. Since the return value is a relative
+ // duration, it should be recomputed by calling this method in the case of a
+ // spurious wakeup.
+ int64_t InNanosecondsFromNow() const;
+
+ // A value that represents no timeout (or an infinite timeout).
+ static constexpr uint64_t kNoTimeout = (std::numeric_limits<uint64_t>::max)();
+
+ // The maximum value that can be stored in the high 63 bits.
+ static constexpr int64_t kMaxNanos = (std::numeric_limits<int64_t>::max)();
+};
} // namespace synchronization_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
index d581d826af..9f29b2ca87 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
@@ -40,13 +40,6 @@ std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
return identity->blocked_count_ptr;
}
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
- new (Waiter::GetWaiter(identity)) Waiter();
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
@@ -54,7 +47,7 @@ void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
// Wakeup the waiting thread since it is time for it to become idle.
- Waiter::GetWaiter(identity)->Poke();
+ Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
@@ -64,11 +57,22 @@ Y_ABSL_NAMESPACE_END
extern "C" {
+Y_ABSL_ATTRIBUTE_WEAK void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ y_absl::base_internal::ThreadIdentity *identity) {
+ new (y_absl::synchronization_internal::Waiter::GetWaiter(identity))
+ y_absl::synchronization_internal::Waiter();
+}
+
Y_ABSL_ATTRIBUTE_WEAK void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
y_absl::base_internal::ThreadIdentity *identity) {
y_absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
+Y_ABSL_ATTRIBUTE_WEAK void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ y_absl::base_internal::ThreadIdentity *identity) {
+ y_absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
+}
+
Y_ABSL_ATTRIBUTE_WEAK bool Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
y_absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
index 6c7ae2f8c7..5a7ca91292 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
@@ -64,7 +64,7 @@ class PerThreadSem {
private:
// Create the PerThreadSem associated with "identity". Initializes count=0.
// REQUIRES: May only be called by ThreadIdentity.
- static void Init(base_internal::ThreadIdentity* identity);
+ static inline void Init(base_internal::ThreadIdentity* identity);
// Increments "identity"'s count.
static inline void Post(base_internal::ThreadIdentity* identity);
@@ -91,12 +91,21 @@ Y_ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
+void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ y_absl::base_internal::ThreadIdentity* identity);
void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
y_absl::base_internal::ThreadIdentity* identity);
bool Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
y_absl::synchronization_internal::KernelTimeout t);
+void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ y_absl::base_internal::ThreadIdentity* identity);
} // extern "C"
+void y_absl::synchronization_internal::PerThreadSem::Init(
+ y_absl::base_internal::ThreadIdentity* identity) {
+ Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
+}
+
void y_absl::synchronization_internal::PerThreadSem::Post(
y_absl::base_internal::ThreadIdentity* identity) {
Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
new file mode 100644
index 0000000000..38b01235be
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.cc
@@ -0,0 +1,167 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/pthread_waiter.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <cassert>
+#include <cerrno>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+namespace {
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+} // namespace
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char PthreadWaiter::kName[];
+#endif
+
+PthreadWaiter::PthreadWaiter() : waiter_count_(0), wakeup_count_(0) {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+}
+
+#ifdef __APPLE__
+#define Y_ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP 1
+#endif
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define Y_ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define Y_ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#endif
+
+// Calls pthread_cond_timedwait() or possibly something else like
+// pthread_cond_timedwait_relative_np() depending on the platform and
+// KernelTimeout requested. The return value is the same as the return
+// value of pthread_cond_timedwait().
+int PthreadWaiter::TimedWait(KernelTimeout t) {
+ assert(t.has_timeout());
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#ifdef Y_ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
+ const auto rel_timeout = t.MakeRelativeTimespec();
+ return pthread_cond_timedwait_relative_np(&cv_, &mu_, &rel_timeout);
+#elif defined(Y_ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT) && \
+ defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return pthread_cond_clockwait(&cv_, &mu_, CLOCK_MONOTONIC,
+ &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+}
+
+bool PthreadWaiter::Wait(KernelTimeout t) {
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = TimedWait(t);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "PthreadWaiter::TimedWait() failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void PthreadWaiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (Y_ABSL_PREDICT_FALSE(err != 0)) {
+ Y_ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_PTHREAD_WAITER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.h
new file mode 100644
index 0000000000..32cb31b01d
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/pthread_waiter.h
@@ -0,0 +1,60 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define Y_ABSL_INTERNAL_HAVE_PTHREAD_WAITER 1
+
+class PthreadWaiter : public WaiterCrtp<PthreadWaiter> {
+ public:
+ PthreadWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "PthreadWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ndef _WIN32
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
new file mode 100644
index 0000000000..0f257e88b3
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.cc
@@ -0,0 +1,122 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/sem_waiter.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_SEM_WAITER
+
+#include <semaphore.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <cerrno>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char SemWaiter::kName[];
+#endif
+
+SemWaiter::SemWaiter() : wakeups_(0) {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ Y_ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+}
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define Y_ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define Y_ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#endif
+
+// Calls sem_timedwait() or possibly something else like
+// sem_clockwait() depending on the platform and
+// KernelTimeout requested. The return value is the same as a call to the return
+// value to a call to sem_timedwait().
+int SemWaiter::TimedWait(KernelTimeout t) {
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#if defined(Y_ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT) && defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return sem_clockwait(&sem_, CLOCK_MONOTONIC, &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return sem_timedwait(&sem_, &abs_timeout);
+}
+
+bool SemWaiter::Wait(KernelTimeout t) {
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ Y_ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (TimedWait(t) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ Y_ABSL_RAW_LOG(FATAL, "SemWaiter::TimedWait() failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void SemWaiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void SemWaiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ Y_ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_SEM_WAITER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.h
new file mode 100644
index 0000000000..f1d12140f0
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/sem_waiter.h
@@ -0,0 +1,65 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef Y_ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/futex.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define Y_ABSL_INTERNAL_HAVE_SEM_WAITER 1
+
+class SemWaiter : public WaiterCrtp<SemWaiter> {
+ public:
+ SemWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "SemWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ sem_t sem_;
+
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_HAVE_SEMAPHORE_H
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
new file mode 100644
index 0000000000..7eed1069c2
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.cc
@@ -0,0 +1,91 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/stdcpp_waiter.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_STDCPP_WAITER
+
+#include <chrono> // NOLINT(build/c++11)
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char StdcppWaiter::kName[];
+#endif
+
+StdcppWaiter::StdcppWaiter() : waiter_count_(0), wakeup_count_(0) {}
+
+bool StdcppWaiter::Wait(KernelTimeout t) {
+ std::unique_lock<std::mutex> lock(mu_);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ cv_.wait(lock);
+ } else {
+ auto wait_result = t.SupportsSteadyClock() && t.is_relative_timeout()
+ ? cv_.wait_for(lock, t.ToChronoDuration())
+ : cv_.wait_until(lock, t.ToChronoTimePoint());
+ if (wait_result == std::cv_status::timeout) {
+ --waiter_count_;
+ return false;
+ }
+ }
+ first_pass = false;
+ }
+
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void StdcppWaiter::Post() {
+ std::lock_guard<std::mutex> lock(mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::Poke() {
+ std::lock_guard<std::mutex> lock(mu_);
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ cv_.notify_one();
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_STDCPP_WAITER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.h
new file mode 100644
index 0000000000..552c57ae9b
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/stdcpp_waiter.h
@@ -0,0 +1,56 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "y_absl/base/config.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define Y_ABSL_INTERNAL_HAVE_STDCPP_WAITER 1
+
+class StdcppWaiter : public WaiterCrtp<StdcppWaiter> {
+ public:
+ StdcppWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "StdcppWaiter";
+
+ private:
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ std::mutex mu_;
+ std::condition_variable cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
deleted file mode 100644
index 7c8c7988ae..0000000000
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "y_absl/synchronization/internal/waiter.h"
-
-#include "y_absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef Y_ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "y_absl/base/internal/raw_logging.h"
-#include "y_absl/base/internal/thread_identity.h"
-#include "y_absl/base/optimization.h"
-#include "y_absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace y_absl {
-Y_ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
- base_internal::ThreadIdentity *identity =
- base_internal::CurrentThreadIdentityIfPresent();
- assert(identity != nullptr);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- const int ticker = identity->ticker.load(std::memory_order_relaxed);
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
- identity->is_idle.store(true, std::memory_order_relaxed);
- }
-}
-
-#if Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- // Loop until we can atomically decrement futex from a positive
- // value, waiting on a futex while we believe it is zero.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
-
- while (true) {
- int32_t x = futex_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!futex_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- return true; // Consumed a wakeup, we are done.
- }
-
- if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
- if (err != 0) {
- if (err == -EINTR || err == -EWOULDBLOCK) {
- // Do nothing, the loop will retry.
- } else if (err == -ETIMEDOUT) {
- return false;
- } else {
- Y_ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- if (futex_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- // Wake one thread waiting on the futex.
- const int err = Futex::Wake(&futex_, 1);
- if (Y_ABSL_PREDICT_FALSE(err < 0)) {
- Y_ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
-}
-
-#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
- explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
- const int err = pthread_mutex_lock(mu_);
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
- }
- }
-
- PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
- PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
- ~PthreadMutexHolder() {
- const int err = pthread_mutex_unlock(mu_);
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
- }
- }
-
- private:
- pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
- const int err = pthread_mutex_init(&mu_, 0);
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
- }
-
- const int err2 = pthread_cond_init(&cv_, 0);
- if (err2 != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
- }
-
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- PthreadMutexHolder h(&mu_);
- ++waiter_count_;
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!t.has_timeout()) {
- const int err = pthread_cond_wait(&cv_, &mu_);
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
- }
- } else {
- const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
- if (err == ETIMEDOUT) {
- --waiter_count_;
- return false;
- }
- if (err != 0) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- PthreadMutexHolder h(&mu_);
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- PthreadMutexHolder h(&mu_);
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- const int err = pthread_cond_signal(&cv_);
- if (Y_ABSL_PREDICT_FALSE(err != 0)) {
- Y_ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
- }
- }
-}
-
-#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
- if (sem_init(&sem_, 0, 0) != 0) {
- Y_ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
- }
- wakeups_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- // Loop until we timeout or consume a wakeup.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (true) {
- int x = wakeups_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!wakeups_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- // Successfully consumed a wakeup, we're done.
- return true;
- }
-
- if (!first_pass) MaybeBecomeIdle();
- // Nothing to consume, wait (looping on EINTR).
- while (true) {
- if (!t.has_timeout()) {
- if (sem_wait(&sem_) == 0) break;
- if (errno == EINTR) continue;
- Y_ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
- } else {
- if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
- if (errno == EINTR) continue;
- if (errno == ETIMEDOUT) return false;
- Y_ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- // Post a wakeup.
- if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
- Y_ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
- }
-}
-
-#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
- static SRWLOCK *GetLock(Waiter *w) {
- return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
- }
-
- static CONDITION_VARIABLE *GetCond(Waiter *w) {
- return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
- }
-
- static_assert(sizeof(SRWLOCK) == sizeof(void *),
- "`mu_storage_` does not have the same size as SRWLOCK");
- static_assert(alignof(SRWLOCK) == alignof(void *),
- "`mu_storage_` does not have the same alignment as SRWLOCK");
-
- static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
- "`Y_ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
- "as `CONDITION_VARIABLE`");
- static_assert(
- alignof(CONDITION_VARIABLE) == alignof(void *),
- "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
- // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
- // and destructible because we never call their constructors or destructors.
- static_assert(std::is_trivially_constructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially constructible");
- static_assert(
- std::is_trivially_constructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially constructible");
- static_assert(std::is_trivially_destructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially destructible");
- static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
- explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
- AcquireSRWLockExclusive(mu_);
- }
-
- LockHolder(const LockHolder&) = delete;
- LockHolder& operator=(const LockHolder&) = delete;
-
- ~LockHolder() {
- ReleaseSRWLockExclusive(mu_);
- }
-
- private:
- SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
- auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
- auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
- InitializeSRWLock(mu);
- InitializeConditionVariable(cv);
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- SRWLOCK *mu = WinHelper::GetLock(this);
- CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
- LockHolder h(mu);
- ++waiter_count_;
-
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long to simplify the Y_ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
- if (err == ERROR_TIMEOUT) {
- --waiter_count_;
- return false;
- } else {
- Y_ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- LockHolder h(WinHelper::GetLock(this));
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- LockHolder h(WinHelper::GetLock(this));
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- WakeConditionVariable(WinHelper::GetCond(this));
- }
-}
-
-#else
-#error Unknown Y_ABSL_WAITER_MODE
-#endif
-
-} // namespace synchronization_internal
-Y_ABSL_NAMESPACE_END
-} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
index 17e81655fc..34ecc74337 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
@@ -17,142 +17,48 @@
#define Y_ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
#include "y_absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef Y_ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "y_absl/base/internal/thread_identity.h"
-#include "y_absl/synchronization/internal/futex.h"
-#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/futex_waiter.h"
+#include "y_absl/synchronization/internal/pthread_waiter.h"
+#include "y_absl/synchronization/internal/sem_waiter.h"
+#include "y_absl/synchronization/internal/stdcpp_waiter.h"
+#include "y_absl/synchronization/internal/win32_waiter.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
#define Y_ABSL_WAITER_MODE_FUTEX 0
#define Y_ABSL_WAITER_MODE_SEM 1
#define Y_ABSL_WAITER_MODE_CONDVAR 2
#define Y_ABSL_WAITER_MODE_WIN32 3
+#define Y_ABSL_WAITER_MODE_STDCPP 4
#if defined(Y_ABSL_FORCE_WAITER_MODE)
#define Y_ABSL_WAITER_MODE Y_ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#elif defined(Y_ABSL_INTERNAL_HAVE_WIN32_WAITER)
#define Y_ABSL_WAITER_MODE Y_ABSL_WAITER_MODE_WIN32
-#elif defined(Y_ABSL_INTERNAL_HAVE_FUTEX)
+#elif defined(Y_ABSL_INTERNAL_HAVE_FUTEX_WAITER)
#define Y_ABSL_WAITER_MODE Y_ABSL_WAITER_MODE_FUTEX
-#elif defined(Y_ABSL_HAVE_SEMAPHORE_H)
+#elif defined(Y_ABSL_INTERNAL_HAVE_SEM_WAITER)
#define Y_ABSL_WAITER_MODE Y_ABSL_WAITER_MODE_SEM
-#else
+#elif defined(Y_ABSL_INTERNAL_HAVE_PTHREAD_WAITER)
#define Y_ABSL_WAITER_MODE Y_ABSL_WAITER_MODE_CONDVAR
+#else
+#error Y_ABSL_WAITER_MODE is undefined
#endif
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
- // Prepare any data to track waits.
- Waiter();
-
- // Not copyable or movable
- Waiter(const Waiter&) = delete;
- Waiter& operator=(const Waiter&) = delete;
-
- // Blocks the calling thread until a matching call to `Post()` or
- // `t` has passed. Returns `true` if woken (`Post()` called),
- // `false` on timeout.
- bool Wait(KernelTimeout t);
-
- // Restart the caller of `Wait()` as with a normal semaphore.
- void Post();
-
- // If anyone is waiting, wake them up temporarily and cause them to
- // call `MaybeBecomeIdle()`. They will then return to waiting for a
- // `Post()` or timeout.
- void Poke();
-
- // Returns the Waiter associated with the identity.
- static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
- static_assert(
- sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
- "Insufficient space for Waiter");
- return reinterpret_cast<Waiter*>(identity->waiter_state.data);
- }
-
- // How many periods to remain idle before releasing resources
-#ifndef Y_ABSL_HAVE_THREAD_SANITIZER
- static constexpr int kIdlePeriods = 60;
-#else
- // Memory consumption under ThreadSanitizer is a serious concern,
- // so we release resources sooner. The value of 1 leads to 1 to 2 second
- // delay before marking a thread as idle.
- static const int kIdlePeriods = 1;
-#endif
-
- private:
- // The destructor must not be called since Mutex/CondVar
- // can use PerThreadSem/Waiter after the thread exits.
- // Waiter objects are embedded in ThreadIdentity objects,
- // which are reused via a freelist and are never destroyed.
- ~Waiter() = delete;
-
#if Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be 32-bits.
- // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
- std::atomic<int32_t> futex_;
- static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_CONDVAR
- // REQUIRES: mu_ must be held.
- void InternalCondVarPoke();
-
- pthread_mutex_t mu_;
- pthread_cond_t cv_;
- int waiter_count_;
- int wakeup_count_; // Unclaimed wakeups.
-
+using Waiter = FutexWaiter;
#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_SEM
- sem_t sem_;
- // This seems superfluous, but for Poke() we need to cause spurious
- // wakeups on the semaphore. Hence we can't actually use the
- // semaphore's count.
- std::atomic<int> wakeups_;
-
+using Waiter = SemWaiter;
+#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_CONDVAR
+using Waiter = PthreadWaiter;
#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_WIN32
- // WinHelper - Used to define utilities for accessing the lock and
- // condition variable storage once the types are complete.
- class WinHelper;
-
- // REQUIRES: WinHelper::GetLock(this) must be held.
- void InternalCondVarPoke();
-
- // We can't include Windows.h in our headers, so we use aligned character
- // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
- // SRW locks and condition variables do not need to be explicitly destroyed.
- // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
- // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
- alignas(void*) unsigned char mu_storage_[sizeof(void*)];
- alignas(void*) unsigned char cv_storage_[sizeof(void*)];
- int waiter_count_;
- int wakeup_count_;
-
-#else
- #error Unknown Y_ABSL_WAITER_MODE
+using Waiter = Win32Waiter;
+#elif Y_ABSL_WAITER_MODE == Y_ABSL_WAITER_MODE_STDCPP
+using Waiter = StdcppWaiter;
#endif
-};
} // namespace synchronization_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
new file mode 100644
index 0000000000..8d7f99b49f
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/thread_identity.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int WaiterBase::kIdlePeriods;
+#endif
+
+void WaiterBase::MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.h
new file mode 100644
index 0000000000..e42df63bbf
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter_base.h
@@ -0,0 +1,90 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// `Waiter` is a platform specific semaphore implementation that `PerThreadSem`
+// waits on to implement blocking in `y_absl::Mutex`. Implementations should
+// inherit from `WaiterCrtp` and must implement `Wait()`, `Post()`, and `Poke()`
+// as described in `WaiterBase`. `waiter.h` selects the implementation and uses
+// static-dispatch for performance.
+class WaiterBase {
+ public:
+ WaiterBase() = default;
+
+ // Not copyable or movable
+ WaiterBase(const WaiterBase&) = delete;
+ WaiterBase& operator=(const WaiterBase&) = delete;
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ //
+ // bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ //
+ // void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ //
+ // void Poke();
+
+ // Returns the name of this implementation. Used only for debugging.
+ //
+ // static constexpr char kName[];
+
+ // How many periods to remain idle before releasing resources
+#ifndef Y_ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static constexpr int kIdlePeriods = 1;
+#endif
+
+ protected:
+ static void MaybeBecomeIdle();
+};
+
+template <typename T>
+class WaiterCrtp : public WaiterBase {
+ public:
+ // Returns the Waiter associated with the identity.
+ static T* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(T) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<T*>(identity->waiter_state.data);
+ }
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
new file mode 100644
index 0000000000..055d722bd1
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.cc
@@ -0,0 +1,151 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/synchronization/internal/win32_waiter.h"
+
+#ifdef Y_ABSL_INTERNAL_HAVE_WIN32_WAITER
+
+#include <windows.h>
+
+#include "y_absl/base/config.h"
+#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/thread_identity.h"
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef Y_ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char Win32Waiter::kName[];
+#endif
+
+class Win32Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Win32Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Win32Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
+ static_assert(sizeof(SRWLOCK) == sizeof(void *),
+ "`mu_storage_` does not have the same size as SRWLOCK");
+ static_assert(alignof(SRWLOCK) == alignof(void *),
+ "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+ static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+ "`Y_ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+ "as `CONDITION_VARIABLE`");
+ static_assert(
+ alignof(CONDITION_VARIABLE) == alignof(void *),
+ "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially constructible");
+ static_assert(
+ std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially constructible");
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially destructible");
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially destructible");
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Win32Waiter::Win32Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+bool Win32Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the Y_ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ Y_ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Win32Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // Y_ABSL_INTERNAL_HAVE_WIN32_WAITER
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.h
new file mode 100644
index 0000000000..6e4f76af24
--- /dev/null
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/win32_waiter.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef Y_ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+#define Y_ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#endif
+
+#if defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#include "y_absl/base/config.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+#include "y_absl/synchronization/internal/waiter_base.h"
+
+namespace y_absl {
+Y_ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define Y_ABSL_INTERNAL_HAVE_WIN32_WAITER 1
+
+class Win32Waiter : public WaiterCrtp<Win32Waiter> {
+ public:
+ Win32Waiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "Win32Waiter";
+
+ private:
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
+ // We can't include Windows.h in our headers, so we use aligned character
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ // SRW locks and condition variables do not need to be explicitly destroyed.
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+ // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+ int waiter_count_;
+ int wakeup_count_;
+};
+
+} // namespace synchronization_internal
+Y_ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#endif // Y_ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
index 43ab36fe07..004fd4c06f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
@@ -35,10 +35,9 @@
#include <algorithm>
#include <atomic>
-#include <cinttypes>
#include <cstddef>
+#include <cstdlib>
#include <cstring>
-#include <iterator>
#include <thread> // NOLINT(build/c++11)
#include "y_absl/base/attributes.h"
@@ -55,7 +54,6 @@
#include "y_absl/base/internal/thread_identity.h"
#include "y_absl/base/internal/tsan_mutex_interface.h"
#include "y_absl/base/optimization.h"
-#include "y_absl/base/port.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/debugging/symbolize.h"
#include "y_absl/synchronization/internal/graphcycles.h"
@@ -63,6 +61,7 @@
#include "y_absl/time/time.h"
using y_absl::base_internal::CurrentThreadIdentityIfPresent;
+using y_absl::base_internal::CycleClock;
using y_absl::base_internal::PerThreadSynch;
using y_absl::base_internal::SchedulingGuard;
using y_absl::base_internal::ThreadIdentity;
@@ -98,18 +97,15 @@ Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<void (*)(
- const char *msg, const void *obj, int64_t wait_cycles)>
+ const char* msg, const void* obj, int64_t wait_cycles)>
mutex_tracer;
Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- y_absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
- cond_var_tracer;
-Y_ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<
- bool (*)(const void *pc, char *out, int out_size)>
- symbolizer(y_absl::Symbolize);
+y_absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
+ cond_var_tracer;
} // namespace
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock);
@@ -117,19 +113,15 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)) {
mutex_tracer.Store(fn);
}
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
cond_var_tracer.Store(fn);
}
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
- symbolizer.Store(fn);
-}
-
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
@@ -148,25 +140,24 @@ y_absl::Duration MeasureTimeToYield() {
return y_absl::Now() - before;
}
-const MutexGlobals &GetMutexGlobals() {
+const MutexGlobals& GetMutexGlobals() {
Y_ABSL_CONST_INIT static MutexGlobals data;
y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = y_absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep.
- // Real-time threads are often unable to yield, so the sleep time needs
- // to be long enough to keep the calling thread asleep until scheduling
- // happens.
- // If this is multiprocessor, allow spinning. If the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
+ if (y_absl::base_internal::NumCPUs() > 1) {
+ // If this is multiprocessor, allow spinning. If the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ data.spinloop_iterations = 1500;
data.mutex_sleep_spins[AGGRESSIVE] = 5000;
data.mutex_sleep_spins[GENTLE] = 250;
data.mutex_sleep_time = y_absl::Microseconds(10);
} else {
+ // If this a uniprocessor, only yield/sleep. Real-time threads are often
+ // unable to yield, so the sleep time needs to be long enough to keep
+ // the calling thread asleep until scheduling happens.
+ data.spinloop_iterations = 0;
data.mutex_sleep_spins[AGGRESSIVE] = 0;
data.mutex_sleep_spins[GENTLE] = 0;
data.mutex_sleep_time = MeasureTimeToYield() * 5;
@@ -219,8 +210,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v | bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -235,8 +225,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v & ~bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -251,7 +240,7 @@ void ResetDeadlockGraphMu() {
}
// Graph used to detect deadlocks.
-Y_ABSL_CONST_INIT static GraphCycles *deadlock_graph
+Y_ABSL_CONST_INIT static GraphCycles* deadlock_graph
Y_ABSL_GUARDED_BY(deadlock_graph_mu) Y_ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
@@ -295,7 +284,7 @@ enum { // Event flags
// Properties of the events.
static const struct {
int flags;
- const char *msg;
+ const char* msg;
} event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "},
@@ -320,12 +309,12 @@ Y_ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
// Can't be too small, as it's used for deadlock detection information.
static constexpr uint32_t kNSynchEvent = 1031;
-static struct SynchEvent { // this is a trivial hash table for the events
+static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
int refcount Y_ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains
- SynchEvent *next Y_ABSL_GUARDED_BY(synch_event_mu);
+ SynchEvent* next Y_ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization
uintptr_t masked_addr; // object at this address is called "name"
@@ -333,13 +322,13 @@ static struct SynchEvent { // this is a trivial hash table for the events
// No explicit synchronization used. Instead we assume that the
// client who enables/disables invariants/logging on a Mutex does so
// while the Mutex is not being concurrently accessed by others.
- void (*invariant)(void *arg); // called on each event
- void *arg; // first arg to (*invariant)()
- bool log; // logging turned on
+ void (*invariant)(void* arg); // called on each event
+ void* arg; // first arg to (*invariant)()
+ bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] Y_ABSL_GUARDED_BY(synch_event_mu);
+ char name[1]; // actually longer---NUL-terminated string
+}* synch_event[kNSynchEvent] Y_ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing
@@ -348,11 +337,11 @@ static struct SynchEvent { // this is a trivial hash table for the events
// the string name is copied into it.
// When used with a mutex, the caller should also ensure that kMuEvent
// is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
- const char *name, intptr_t bits,
+static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
+ const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
for (e = synch_event[h];
@@ -364,9 +353,9 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
name = "";
}
size_t l = strlen(name);
- e = reinterpret_cast<SynchEvent *>(
+ e = reinterpret_cast<SynchEvent*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
- e->refcount = 2; // one for return value, one for linked list
+ e->refcount = 2; // one for return value, one for linked list
e->masked_addr = base_internal::HidePtr(addr);
e->invariant = nullptr;
e->arg = nullptr;
@@ -376,19 +365,19 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
- e->refcount++; // for return value
+ e->refcount++; // for return value
}
synch_event_mu.Unlock();
return e;
}
// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
+static void DeleteSynchEvent(SynchEvent* e) {
base_internal::LowLevelAlloc::Free(e);
}
// Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
+static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
@@ -402,11 +391,11 @@ static void UnrefSynchEvent(SynchEvent *e) {
// Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent **pe;
- SynchEvent *e;
+ SynchEvent** pe;
+ SynchEvent* e;
synch_event_mu.Lock();
for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -427,9 +416,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
-static SynchEvent *GetSynchEvent(const void *addr) {
+static SynchEvent* GetSynchEvent(const void* addr) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -444,17 +433,17 @@ static SynchEvent *GetSynchEvent(const void *addr) {
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
// if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
- SynchEvent *e = GetSynchEvent(obj);
+static void PostSynchEvent(void* obj, int ev) {
+ SynchEvent* e = GetSynchEvent(obj);
// logging is on if event recording is on and either there's no event struct,
// or it explicitly says to log
if (e == nullptr || e->log) {
- void *pcs[40];
+ void* pcs[40];
int n = y_absl::GetStackTrace(pcs, Y_ABSL_ARRAYSIZE(pcs), 1);
// A buffer with enough space for the ASCII for all the PCs, even on a
// 64-bit machine.
char buffer[Y_ABSL_ARRAYSIZE(pcs) * 24];
- int pos = snprintf(buffer, sizeof (buffer), " @");
+ int pos = snprintf(buffer, sizeof(buffer), " @");
for (int i = 0; i != n; i++) {
int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
" %p", pcs[i]);
@@ -476,13 +465,13 @@ static void PostSynchEvent(void *obj, int ev) {
// get false positive race reports later.
// Reuse EvalConditionAnnotated to properly call into user code.
struct local {
- static bool pred(SynchEvent *ev) {
+ static bool pred(SynchEvent* ev) {
(*ev->invariant)(ev->arg);
return false;
}
};
Condition cond(&local::pred, e);
- Mutex *mu = static_cast<Mutex *>(obj);
+ Mutex* mu = static_cast<Mutex*>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -508,32 +497,32 @@ static void PostSynchEvent(void *obj, int ev) {
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
// the thread is on a Mutex's waiter queue.
struct SynchWaitParams {
- SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
- KernelTimeout timeout_arg, Mutex *cvmu_arg,
- PerThreadSynch *thread_arg,
- std::atomic<intptr_t> *cv_word_arg)
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
+ KernelTimeout timeout_arg, Mutex* cvmu_arg,
+ PerThreadSynch* thread_arg,
+ std::atomic<intptr_t>* cv_word_arg)
: how(how_arg),
cond(cond_arg),
timeout(timeout_arg),
cvmu(cvmu_arg),
thread(thread_arg),
cv_word(cv_word_arg),
- contention_start_cycles(base_internal::CycleClock::Now()),
+ contention_start_cycles(CycleClock::Now()),
should_submit_contention_data(false) {}
const Mutex::MuHow how; // How this thread needs to wait.
- const Condition *cond; // The condition that this thread is waiting for.
- // In Mutex, this field is set to zero if a timeout
- // expires.
+ const Condition* cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
KernelTimeout timeout; // timeout expiry---absolute time
// In Mutex, this field is set to zero if a timeout
// expires.
- Mutex *const cvmu; // used for transfer from cond var to mutex
- PerThreadSynch *const thread; // thread that is waiting
+ Mutex* const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch* const thread; // thread that is waiting
// If not null, thread should be enqueued on the CondVar whose state
// word is cv_word instead of queueing normally on the Mutex.
- std::atomic<intptr_t> *cv_word;
+ std::atomic<intptr_t>* cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex.
@@ -541,12 +530,12 @@ struct SynchWaitParams {
};
struct SynchLocksHeld {
- int n; // number of valid entries in locks[]
- bool overflow; // true iff we overflowed the array at some point
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
struct {
- Mutex *mu; // lock acquired
- int32_t count; // times acquired
- GraphId id; // deadlock_graph id of acquired lock
+ Mutex* mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
} locks[40];
// If a thread overfills the array during deadlock detection, we
// continue, discarding information as needed. If no overflow has
@@ -556,11 +545,11 @@ struct SynchLocksHeld {
// A sentinel value in lists that is not 0.
// A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
- reinterpret_cast<PerThreadSynch *>(1);
+static PerThreadSynch* const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch*>(1);
-static SynchLocksHeld *LocksHeldAlloc() {
- SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+static SynchLocksHeld* LocksHeldAlloc() {
+ SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0;
ret->overflow = false;
@@ -568,24 +557,24 @@ static SynchLocksHeld *LocksHeldAlloc() {
}
// Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
- ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+static PerThreadSynch* Synch_GetPerThread() {
+ ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch;
}
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
if (mu) {
Y_ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
- PerThreadSynch *w = Synch_GetPerThread();
+ PerThreadSynch* w = Synch_GetPerThread();
if (mu) {
Y_ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return w;
}
-static SynchLocksHeld *Synch_GetAllLocks() {
- PerThreadSynch *s = Synch_GetPerThread();
+static SynchLocksHeld* Synch_GetAllLocks() {
+ PerThreadSynch* s = Synch_GetPerThread();
if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
}
@@ -593,7 +582,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
if (mu) {
Y_ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
// We miss synchronization around passing PerThreadSynch between threads
@@ -609,7 +598,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
}
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
if (mu) {
Y_ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -630,7 +619,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
// Mutex code checking that the "waitp" field has not been reused.
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
// Fix the per-thread state only if it exists.
- ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true;
}
@@ -639,21 +628,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static y_absl::Time DeadlineFromTimeout(y_absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return y_absl::TimeFromTimeval(tv) + timeout;
-#else
- return y_absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -664,24 +638,29 @@ static y_absl::Time DeadlineFromTimeout(y_absl::Duration timeout) {
// bit-twiddling trick in Mutex::Unlock().
// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
// to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
-static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
-static const intptr_t kMuWait = 0x0004L; // threads are waiting
-static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
-static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+// There's a designated waker.
// INVARIANT1: there's a thread that was blocked on the mutex, is
// no longer, yet has not yet acquired the mutex. If there's a
// designated waker, all threads can avoid taking the slow path in
// unlock because the designated waker will subsequently acquire
// the lock and wake someone. To maintain INVARIANT1 the bit is
// set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
- // for a reader
-static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
-static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
-static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+// unblocked reset the bit when they either acquire or re-block (INV1b).
+static const intptr_t kMuDesig = 0x0002L;
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// Runnable writer is waiting for a reader.
+// If set, new readers will not lock the mutex to avoid writer starvation.
+// Note: if a reader has higher priority than the writer, it will still lock
+// the mutex ahead of the waiting writer, but in a very inefficient manner:
+// the reader will first queue itself and block, but then the last unlocking
+// reader will wake it.
+static const intptr_t kMuWrWait = 0x0020L;
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
// Hack to make constant values available to gdb pretty printer
enum {
@@ -777,8 +756,8 @@ Mutex::~Mutex() {
Y_ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
-void Mutex::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+void Mutex::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -787,11 +766,10 @@ void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release);
}
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
- void *arg) {
+void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant;
e->arg = arg;
UnrefSynchEvent(e);
@@ -807,15 +785,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
// waiters with the same condition, type of lock, and thread priority.
//
// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
// Given the contents of a mutex word containing a PerThreadSynch pointer,
// return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
- return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
}
// The next several routines maintain the per-thread next and skip fields
@@ -873,17 +851,17 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// except those in the added node and the former "head" node. This implies
// that the new node is added after head, and so must be the new head or the
// new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
- PerThreadSynch *x0 = nullptr;
- PerThreadSynch *x1 = x;
- PerThreadSynch *x2 = x->skip;
+static PerThreadSynch* Skip(PerThreadSynch* x) {
+ PerThreadSynch* x0 = nullptr;
+ PerThreadSynch* x1 = x;
+ PerThreadSynch* x2 = x->skip;
if (x2 != nullptr) {
// Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
// such that x1 == x0->skip && x2 == x1->skip
while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
- x0->skip = x2; // short-circuit skip from x0 to x2
+ x0->skip = x2; // short-circuit skip from x0 to x2
}
- x->skip = x1; // short-circuit skip from x to result
+ x->skip = x1; // short-circuit skip from x to result
}
return x1;
}
@@ -892,7 +870,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) {
// The latter is going to be removed out of order, because of a timeout.
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
// and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
@@ -904,7 +882,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
}
}
-static void CondVarEnqueue(SynchWaitParams *waitp);
+static void CondVarEnqueue(SynchWaitParams* waitp);
// Enqueue thread "waitp->thread" on a waiter queue.
// Called with mutex spinlock held if head != nullptr
@@ -925,8 +903,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp);
// returned. This mechanism is used by CondVar to queue a thread on the
// condition variable queue instead of the mutex queue in implementing Wait().
// In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
- SynchWaitParams *waitp, intptr_t mu, int flags) {
+static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
+ intptr_t mu, int flags) {
// If we have been given a cv_word, call CondVarEnqueue() and return
// the previous head of the Mutex waiter queue.
if (waitp->cv_word != nullptr) {
@@ -934,42 +912,43 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
return head;
}
- PerThreadSynch *s = waitp->thread;
+ PerThreadSynch* s = waitp->thread;
Y_ABSL_RAW_CHECK(
s->waitp == nullptr || // normal case
s->waitp == waitp || // Fer()---transfer from condition variable
s->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
s->waitp = waitp;
- s->skip = nullptr; // maintain skip invariant (see above)
- s->may_skip = true; // always true on entering queue
- s->wake = false; // not being woken
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
+#ifdef Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ Y_ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+ }
+ }
+#endif
if (head == nullptr) { // s is the only waiter
s->next = s; // it's the only entry in the cycle
s->readers = mu; // reader count is from mu word
s->maybe_unlocking = false; // no one is searching an empty list
head = s; // s is new head
} else {
- PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+ PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
#ifdef Y_ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = base_internal::CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // base_internal::CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- Y_ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles +
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
- }
- }
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
@@ -979,20 +958,20 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued.
- PerThreadSynch *advance_to = head; // next value of enqueue_after
+ PerThreadSynch* advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
// (side-effect: optimizes skip chain)
advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
- // termination guaranteed because s->priority > head->priority
- // and head is the end of a skip chain
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
} else if (waitp->how == kExclusive &&
Condition::GuaranteedEqual(waitp->cond, nullptr)) {
// An unlocker could be scanning the queue, but we know it will recheck
// the queue front for writers that have no condition, which is what s
// is, so an insert at front is safe.
- enqueue_after = head; // add after head, at front
+ enqueue_after = head; // add after head, at front
}
}
#endif
@@ -1017,12 +996,12 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
enqueue_after->skip = enqueue_after->next;
}
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
- s->skip = s->next; // s may skip to its successor
+ s->skip = s->next; // s may skip to its successor
}
- } else { // enqueue not done any other way, so
- // we're inserting s at the back
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
// s will become new head; copy data from head into it
- s->next = head->next; // add s after head
+ s->next = head->next; // add s after head
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
@@ -1041,17 +1020,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// whose last element is head. The new head element is returned, or null
// if the list is made empty.
// Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
- PerThreadSynch *w = pw->next;
- pw->next = w->next; // snip w out of list
- if (head == w) { // we removed the head
+static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
+ PerThreadSynch* w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
} else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
pw->skip = pw->next->skip;
- } else { // or to pw's successor
+ } else { // or to pw's successor
pw->skip = pw->next;
}
}
@@ -1064,27 +1043,27 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
// singly-linked list wake_list in the order found. Assumes that
// there is only one such element if the element has how == kExclusive.
// Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
- PerThreadSynch *pw,
- PerThreadSynch **wake_tail) {
- PerThreadSynch *orig_h = head;
- PerThreadSynch *w = pw->next;
+static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
+ PerThreadSynch* pw,
+ PerThreadSynch** wake_tail) {
+ PerThreadSynch* orig_h = head;
+ PerThreadSynch* w = pw->next;
bool skipped = false;
do {
- if (w->wake) { // remove this element
+ if (w->wake) { // remove this element
Y_ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
// we're removing pw's successor so either pw->skip is zero or we should
// already have removed pw since if pw->skip!=null, pw has the same
// condition as w.
head = Dequeue(head, pw);
- w->next = *wake_tail; // keep list terminated
- *wake_tail = w; // add w to wake_list;
- wake_tail = &w->next; // next addition to end
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
if (w->waitp->how == kExclusive) { // wake at most 1 writer
break;
}
- } else { // not waking this one; skip
- pw = Skip(w); // skip as much as possible
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
skipped = true;
}
w = pw->next;
@@ -1102,7 +1081,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+void Mutex::TryRemove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
@@ -1110,16 +1089,16 @@ void Mutex::TryRemove(PerThreadSynch *s) {
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if (h != nullptr) {
- PerThreadSynch *pw = h; // pw is w's predecessor
- PerThreadSynch *w;
+ PerThreadSynch* pw = h; // pw is w's predecessor
+ PerThreadSynch* w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
// If the current element isn't equivalent to the waiter to be
// removed, we can skip the entire chain.
if (!MuEquivalentWaiter(s, w)) {
- pw = Skip(w); // so skip all that won't match
+ pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
// because they are in a different equivalence class.
@@ -1131,7 +1110,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// process the first thread again.
} while ((w = pw->next) != s && pw != h);
}
- if (w == s) { // found thread; remove it
+ if (w == s) { // found thread; remove it
// pw->skip may be non-zero here; the loop above ensured that
// no ancestor of s can skip to s, so removal is safe anyway.
h = Dequeue(h, pw);
@@ -1140,16 +1119,15 @@ void Mutex::TryRemove(PerThreadSynch *s) {
}
}
intptr_t nv;
- do { // release spinlock and lock
+ do { // release spinlock and lock
v = mu_.load(std::memory_order_relaxed);
nv = v & (kMuDesig | kMuEvent);
if (h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(h);
- h->readers = 0; // we hold writer lock
+ h->readers = 0; // we hold writer lock
h->maybe_unlocking = false; // finished unlocking
}
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
}
}
@@ -1159,7 +1137,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// if the wait extends past the absolute time specified, even if "s" is still
// on the mutex queue. In this case, remove "s" from the queue and return
// true, otherwise return false.
-void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch* s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
// After a timeout, we go into a spin loop until we remove ourselves
@@ -1178,7 +1156,7 @@ void Mutex::Block(PerThreadSynch *s) {
// is not on the queue.
this->TryRemove(s);
}
- s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
s->waitp->cond = nullptr; // condition no longer relevant for wakeups
}
}
@@ -1188,8 +1166,8 @@ void Mutex::Block(PerThreadSynch *s) {
}
// Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
- PerThreadSynch *next = w->next;
+PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
+ PerThreadSynch* next = w->next;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w);
@@ -1197,7 +1175,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
return next;
}
-static GraphId GetGraphIdLocked(Mutex *mu)
+static GraphId GetGraphIdLocked(Mutex* mu)
Y_ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) { // (re)create the deadlock graph.
deadlock_graph =
@@ -1207,7 +1185,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
return deadlock_graph->GetId(mu);
}
-static GraphId GetGraphId(Mutex *mu) Y_ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex* mu) Y_ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock();
@@ -1217,7 +1195,7 @@ static GraphId GetGraphId(Mutex *mu) Y_ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
// Record a lock acquisition. This is used in debug mode for deadlock
// detection. The held_locks pointer points to the relevant data
// structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1241,7 +1219,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
// It does not process the event if is not needed when deadlock detection is
// disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1256,11 +1234,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
i++;
}
if (i == n) { // mu missing means releasing unheld lock
- SynchEvent *mu_events = GetSynchEvent(mu);
+ SynchEvent* mu_events = GetSynchEvent(mu);
Y_ABSL_RAW_LOG(FATAL,
"thread releasing lock it does not hold: %p %s; "
,
- static_cast<void *>(mu),
+ static_cast<void*>(mu),
mu_events == nullptr ? "" : mu_events->name);
}
}
@@ -1277,7 +1255,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
+static inline void DebugOnlyLockEnter(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1287,7 +1265,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1297,7 +1275,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
}
// Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
+static inline void DebugOnlyLockLeave(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1306,9 +1284,9 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
}
}
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
+static char* StackString(void** pcs, int n, char* buf, int maxlen,
bool symbolize) {
- static const int kSymLen = 200;
+ static constexpr int kSymLen = 200;
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
@@ -1316,7 +1294,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
- if (!symbolizer(pcs[i], sym, kSymLen)) {
+ if (!y_absl::Symbolize(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
@@ -1329,15 +1307,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
}
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
- void *pcs[40];
+static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
+ void* pcs[40];
return StackString(pcs, y_absl::GetStackTrace(pcs, Y_ABSL_ARRAYSIZE(pcs), 2), buf,
maxlen, symbolize);
}
namespace {
-enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
- // a path this long would be remarkable
+enum {
+ kMaxDeadlockPathLen = 10
+}; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
// Buffers required to report a deadlock.
// We do not allocate them on stack to avoid large stack frame.
struct DeadlockReportBuffers {
@@ -1347,11 +1327,11 @@ struct DeadlockReportBuffers {
struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffers() {
- b = reinterpret_cast<DeadlockReportBuffers *>(
+ b = reinterpret_cast<DeadlockReportBuffers*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
}
~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
- DeadlockReportBuffers *b;
+ DeadlockReportBuffers* b;
};
// Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1362,13 +1342,13 @@ int GetStack(void** stack, int max_depth) {
// Called in debug mode when a thread is about to acquire a lock in a way that
// may block.
-static GraphId DeadlockCheck(Mutex *mu) {
+static GraphId DeadlockCheck(Mutex* mu) {
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) {
return InvalidGraphId();
}
- SynchLocksHeld *all_locks = Synch_GetAllLocks();
+ SynchLocksHeld* all_locks = Synch_GetAllLocks();
y_absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1390,8 +1370,8 @@ static GraphId DeadlockCheck(Mutex *mu) {
// For each other mutex already held by this thread:
for (int i = 0; i != all_locks->n; i++) {
const GraphId other_node_id = all_locks->locks[i].id;
- const Mutex *other =
- static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ const Mutex* other =
+ static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
if (other == nullptr) {
// Ignore stale lock
continue;
@@ -1400,7 +1380,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Add the acquired-before edge to the graph.
if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
ScopedDeadlockReportBuffers scoped_buffers;
- DeadlockReportBuffers *b = scoped_buffers.b;
+ DeadlockReportBuffers* b = scoped_buffers.b;
static int number_of_reported_deadlocks = 0;
number_of_reported_deadlocks++;
// Symbolize only 2 first deadlock report to avoid huge slowdowns.
@@ -1411,37 +1391,40 @@ static GraphId DeadlockCheck(Mutex *mu) {
for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) {
- snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
len += strlen(&b->buf[len]);
}
}
Y_ABSL_RAW_LOG(ERROR,
"Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed",
- static_cast<void *>(mu), b->buf);
+ static_cast<void*>(mu), b->buf);
Y_ABSL_RAW_LOG(ERROR, "Cycle: ");
- int path_len = deadlock_graph->FindPath(
- mu_id, other_node_id, Y_ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
+ int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
+ Y_ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len && j != Y_ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
- Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
void** stack;
int depth = deadlock_graph->GetStackTrace(id, &stack);
snprintf(b->buf, sizeof(b->buf),
- "mutex@%p stack: ", static_cast<void *>(path_mu));
+ "mutex@%p stack: ", static_cast<void*>(path_mu));
StackString(stack, depth, b->buf + strlen(b->buf),
static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
symbolize);
Y_ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
+ if (path_len > static_cast<int>(Y_ABSL_ARRAYSIZE(b->path))) {
+ Y_ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+ }
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
Y_ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
return mu_id;
}
- break; // report at most one potential deadlock per acquisition
+ break; // report at most one potential deadlock per acquisition
}
}
@@ -1450,7 +1433,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Invoke DeadlockCheck() iff we're in debug mode and
// deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
return DeadlockCheck(mu);
@@ -1477,13 +1460,13 @@ void Mutex::AssertNotHeld() const {
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
- GraphId id = GetGraphId(const_cast<Mutex *>(this));
- SynchLocksHeld *locks = Synch_GetAllLocks();
+ GraphId id = GetGraphId(const_cast<Mutex*>(this));
+ SynchLocksHeld* locks = Synch_GetAllLocks();
for (int i = 0; i != locks->n; i++) {
if (locks->locks[i].id == id) {
- SynchEvent *mu_events = GetSynchEvent(this);
+ SynchEvent* mu_events = GetSynchEvent(this);
Y_ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
- static_cast<const void *>(this),
+ static_cast<const void*>(this),
(mu_events == nullptr ? "" : mu_events->name));
}
}
@@ -1496,8 +1479,8 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader | kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
@@ -1514,8 +1497,7 @@ void Mutex::Lock() {
intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then spin loop
if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
- !mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
// try spin acquire, then slow loop
if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1541,7 +1523,7 @@ void Mutex::ReaderLock() {
Y_ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-void Mutex::LockWhen(const Condition &cond) {
+void Mutex::LockWhen(const Condition& cond) {
Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kExclusive, &cond, 0);
@@ -1549,21 +1531,26 @@ void Mutex::LockWhen(const Condition &cond) {
Y_ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
-bool Mutex::LockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::LockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout) {
+ Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ Y_ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
-bool Mutex::LockWhenWithDeadline(const Condition &cond, y_absl::Time deadline) {
+bool Mutex::LockWhenWithDeadline(const Condition& cond, y_absl::Time deadline) {
Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
- bool res = LockSlowWithDeadline(kExclusive, &cond,
- KernelTimeout(deadline), 0);
+ bool res =
+ LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
DebugOnlyLockEnter(this, id);
Y_ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
return res;
}
-void Mutex::ReaderLockWhen(const Condition &cond) {
+void Mutex::ReaderLockWhen(const Condition& cond) {
Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kShared, &cond, 0);
@@ -1571,12 +1558,17 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
Y_ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
y_absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ Y_ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
y_absl::Time deadline) {
Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1586,23 +1578,34 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
return res;
}
-void Mutex::Await(const Condition &cond) {
- if (cond.Eval()) { // condition already true; nothing to do
+void Mutex::Await(const Condition& cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
- } else { // normal case
+ } else { // normal case
Y_ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
"condition untrue on return from Await");
}
}
-bool Mutex::AwaitWithTimeout(const Condition &cond, y_absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::AwaitWithTimeout(const Condition& cond, y_absl::Duration timeout) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ Y_ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
-bool Mutex::AwaitWithDeadline(const Condition &cond, y_absl::Time deadline) {
- if (cond.Eval()) { // condition already true; nothing to do
+bool Mutex::AwaitWithDeadline(const Condition& cond, y_absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
@@ -1616,14 +1619,14 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, y_absl::Time deadline) {
return res;
}
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
this->AssertReaderHeld();
MuHow how =
(mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
Y_ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
- SynchWaitParams waitp(
- how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
int flags = kMuHasBlocked;
if (!Condition::GuaranteedEqual(&cond, nullptr)) {
flags |= kMuIsCond;
@@ -1643,14 +1646,13 @@ bool Mutex::TryLock() {
Y_ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
- mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
DebugOnlyLockEnter(this);
Y_ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
return true;
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong(
v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1676,7 +1678,7 @@ bool Mutex::ReaderTryLock() {
// changing (typically because the reader count changes) under the CAS. We
// limit the number of attempts to avoid having to think about livelock.
int loop_limit = 5;
- while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
std::memory_order_acquire,
std::memory_order_relaxed)) {
@@ -1688,7 +1690,7 @@ bool Mutex::ReaderTryLock() {
loop_limit--;
v = mu_.load(std::memory_order_relaxed);
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
loop_limit = 5;
while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1727,7 +1729,7 @@ void Mutex::Unlock() {
// should_try_cas is whether we'll try a compare-and-swap immediately.
// NOTE: optimized out when kDebugMode is false.
bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
- (v & (kMuWait | kMuDesig)) != kMuWait);
+ (v & (kMuWait | kMuDesig)) != kMuWait);
// But, we can use an alternate computation of it, that compilers
// currently don't find on their own. When that changes, this function
// can be simplified.
@@ -1744,10 +1746,9 @@ void Mutex::Unlock() {
static_cast<long long>(v), static_cast<long long>(x),
static_cast<long long>(y));
}
- if (x < y &&
- mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
- std::memory_order_release,
- std::memory_order_relaxed)) {
+ if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
// fast writer release (writer with no waiters or with designated waker)
} else {
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
@@ -1757,7 +1758,7 @@ void Mutex::Unlock() {
// Requires v to represent a reader-locked state.
static bool ExactlyOneReader(intptr_t v) {
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
assert((v & kMuHigh) != 0);
// The more straightforward "(v & kMuHigh) == kMuOne" also works, but
// on some architectures the following generates slightly smaller code.
@@ -1770,12 +1771,11 @@ void Mutex::ReaderUnlock() {
Y_ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
- if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
+ if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
// fast reader release (reader with no waiters)
- intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
Y_ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
return;
@@ -1814,7 +1814,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
}
// Internal version of LockWhen(). See LockSlowWithDeadline()
-Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
int flags) {
Y_ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1822,7 +1822,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
}
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock) {
// Delicate annotation dance.
@@ -1872,7 +1872,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// tsan). As the result there is no tsan-visible synchronization between the
// addition and this thread. So if we would enable race detection here,
// it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
// Memory accesses are already ignored inside of lock/unlock operations,
// but synchronization operations are also ignored. When we evaluate the
// predicate we must ignore only memory accesses but not synchronization,
@@ -1897,7 +1897,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// obstruct this call
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
// Await, LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed);
bool unlock = false;
@@ -1914,9 +1914,9 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
}
unlock = true;
}
- SynchWaitParams waitp(
- how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
if (!Condition::GuaranteedEqual(cond, nullptr)) {
flags |= kMuIsCond;
}
@@ -1957,20 +1957,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
if (Y_ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
"%s: Mutex corrupt: both reader and writer lock held: %p",
- label, reinterpret_cast<void *>(v));
+ label, reinterpret_cast<void*>(v));
RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
- "%s: Mutex corrupt: waiting writer with no waiters: %p",
- label, reinterpret_cast<void *>(v));
+ "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
+ reinterpret_cast<void*>(v));
assert(false);
}
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ PostSynchEvent(
+ this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
}
Y_ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -1995,11 +1995,11 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
flags |= kMuHasBlocked;
c = 0;
}
- } else { // need to access waiter list
+ } else { // need to access waiter list
bool dowait = false;
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv =
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait;
@@ -2011,7 +2011,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
v, reinterpret_cast<intptr_t>(new_h) | nv,
std::memory_order_release, std::memory_order_relaxed)) {
dowait = true;
- } else { // attempted Enqueue() failed
+ } else { // attempted Enqueue() failed
// zero out the waitp field set by Enqueue()
waitp->thread->waitp = nullptr;
}
@@ -2024,9 +2024,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- h->readers += kMuOne; // inc reader count in waiter
- do { // release spinlock
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
std::memory_order_release,
@@ -2036,7 +2036,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->how == kShared)) {
break; // we timed out, or condition true, so return
}
- this->UnlockSlow(waitp); // got lock but condition false
+ this->UnlockSlow(waitp); // got lock but condition false
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
@@ -2047,18 +2047,19 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0;
Y_ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
- wr_wait = kMuWrWait; // give priority to a waiting writer
+ wr_wait = kMuWrWait; // give priority to a waiting writer
}
- do { // release spinlock
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
- v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
- reinterpret_cast<intptr_t>(new_h),
+ v,
+ (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
dowait = true;
}
@@ -2078,9 +2079,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
- SYNCH_EV_READERLOCK_RETURNING);
+ PostSynchEvent(this, waitp->how == kExclusive
+ ? SYNCH_EV_LOCK_RETURNING
+ : SYNCH_EV_READERLOCK_RETURNING);
}
}
@@ -2089,28 +2090,28 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ PostSynchEvent(
+ this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
}
int c = 0;
// the waiter under consideration to wake, or zero
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* w = nullptr;
// the predecessor to w or zero
- PerThreadSynch *pw = nullptr;
+ PerThreadSynch* pw = nullptr;
// head of the list searched previously, or zero
- PerThreadSynch *old_h = nullptr;
+ PerThreadSynch* old_h = nullptr;
// a condition that's known to be false.
- const Condition *known_false = nullptr;
- PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
- intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
- // later writer could have acquired the lock
- // (starvation avoidance)
+ const Condition* known_false = nullptr;
+ PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
Y_ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
@@ -2130,8 +2131,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
// fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
return;
}
@@ -2139,16 +2139,16 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.compare_exchange_strong(v, v | kMuSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- if ((v & kMuWait) == 0) { // no one to wake
+ if ((v & kMuWait) == 0) { // no one to wake
intptr_t nv;
bool do_enqueue = true; // always Enqueue() the first time
Y_ABSL_RAW_CHECK(waitp != nullptr,
"UnlockSlow is confused"); // about to sleep
- do { // must loop to release spinlock as reader count may change
+ do { // must loop to release spinlock as reader count may change
v = mu_.load(std::memory_order_relaxed);
// decrement reader count if there are readers
- intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
- PerThreadSynch *new_h = nullptr;
+ intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
+ PerThreadSynch* new_h = nullptr;
if (do_enqueue) {
// If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
// we must not retry here. The initial attempt will always have
@@ -2172,21 +2172,20 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// release spinlock & our lock; retry if reader-count changed
// (writer count cannot change since we hold lock)
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
break;
}
// There are waiters.
// Set h to the head of the circular waiter list.
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
// a reader but not the last
- h->readers -= kMuOne; // release our lock
- intptr_t nv = v; // normally just release spinlock
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
Y_ABSL_RAW_CHECK(new_h != nullptr,
"waiters disappeared during Enqueue()!");
nv &= kMuLow;
@@ -2204,8 +2203,8 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// The lock is becoming free, and there's a waiter
if (old_h != nullptr &&
- !old_h->may_skip) { // we used old_h as a terminator
- old_h->may_skip = true; // allow old_h to skip once more
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
Y_ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
@@ -2214,7 +2213,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h->next->waitp->how == kExclusive &&
Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
// easy case: writer with no condition; no need to search
- pw = h; // wake w, the successor of h (=pw)
+ pw = h; // wake w, the successor of h (=pw)
w = h->next;
w->wake = true;
// We are waking up a writer. This writer may be racing against
@@ -2237,13 +2236,13 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// waiter has a condition or is a reader. We avoid searching over
// waiters we've searched on previous iterations by starting at
// old_h if it's set. If old_h==h, there's no one to wakeup at all.
- if (old_h == h) { // we've searched before, and nothing's new
- // so there's no one to wake.
- intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
- if (waitp != nullptr) { // we must queue ourselves and sleep
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
nv &= kMuLow;
if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2257,12 +2256,12 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// set up to walk the list
- PerThreadSynch *w_walk; // current waiter during list walk
- PerThreadSynch *pw_walk; // previous waiter during list walk
+ PerThreadSynch* w_walk; // current waiter during list walk
+ PerThreadSynch* pw_walk; // previous waiter during list walk
if (old_h != nullptr) { // we've searched up to old_h before
pw_walk = old_h;
w_walk = old_h->next;
- } else { // no prior search, start at beginning
+ } else { // no prior search, start at beginning
pw_walk =
nullptr; // h->next's predecessor may change; don't record it
w_walk = h->next;
@@ -2288,7 +2287,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// to walk the path from w_walk to h inclusive. (TryRemove() can remove
// a waiter anywhere, but it acquires both the spinlock and the Mutex)
- old_h = h; // remember we searched to here
+ old_h = h; // remember we searched to here
// Walk the path upto and including h looking for waiters we can wake.
while (pw_walk != h) {
@@ -2300,24 +2299,24 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// is in fact true
EvalConditionIgnored(this, w_walk->waitp->cond))) {
if (w == nullptr) {
- w_walk->wake = true; // can wake this waiter
+ w_walk->wake = true; // can wake this waiter
w = w_walk;
pw = pw_walk;
if (w_walk->waitp->how == kExclusive) {
wr_wait = kMuWrWait;
- break; // bail if waking this writer
+ break; // bail if waking this writer
}
} else if (w_walk->waitp->how == kShared) { // wake if a reader
w_walk->wake = true;
- } else { // writer with true condition
+ } else { // writer with true condition
wr_wait = kMuWrWait;
}
- } else { // can't wake; condition false
+ } else { // can't wake; condition false
known_false = w_walk->waitp->cond; // remember last false condition
}
- if (w_walk->wake) { // we're waking reader w_walk
- pw_walk = w_walk; // don't skip similar waiters
- } else { // not waking; skip as much as possible
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
pw_walk = Skip(w_walk);
}
// If pw_walk == h, then load of pw_walk->next can race with
@@ -2344,8 +2343,8 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
h = DequeueAllWakeable(h, pw, &wake_list);
intptr_t nv = (v & kMuEvent) | kMuDesig;
- // assume no waiters left,
- // set kMuDesig for INV1a
+ // assume no waiters left,
+ // set kMuDesig for INV1a
if (waitp != nullptr) { // we must queue ourselves and sleep
h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2358,7 +2357,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h != nullptr) { // there are waiters left
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
+ h->maybe_unlocking = false; // finished unlocking
nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
}
@@ -2369,12 +2368,12 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// aggressive here; no one can proceed till we do
c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
- } // end of for(;;)-loop
+ } // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
int64_t total_wait_cycles = 0;
int64_t max_wait_cycles = 0;
- int64_t now = base_internal::CycleClock::Now();
+ int64_t now = CycleClock::Now();
do {
// Profile lock contention events only if the waiter was trying to acquire
// the lock, not waiting on a condition variable or Condition.
@@ -2386,7 +2385,7 @@ Y_ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
wake_list->waitp->contention_start_cycles = now;
wake_list->waitp->should_submit_contention_data = true;
}
- wake_list = Wakeup(wake_list); // wake waiters
+ wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
if (total_wait_cycles > 0) {
mutex_tracer("slow release", this, total_wait_cycles);
@@ -2414,7 +2413,7 @@ void Mutex::Trans(MuHow how) {
// condition variable. If this mutex is free, we simply wake the thread.
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+void Mutex::Fer(PerThreadSynch* w) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
Y_ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2439,9 +2438,9 @@ void Mutex::Fer(PerThreadSynch *w) {
IncrementSynchSem(this, w);
return;
} else {
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
Y_ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
if (mu_.compare_exchange_strong(
@@ -2451,8 +2450,8 @@ void Mutex::Fer(PerThreadSynch *w) {
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
Y_ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
do {
@@ -2471,19 +2470,18 @@ void Mutex::Fer(PerThreadSynch *w) {
void Mutex::AssertHeld() const {
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
- SynchEvent *e = GetSynchEvent(this);
+ SynchEvent* e = GetSynchEvent(this);
Y_ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
- static_cast<const void *>(this),
- (e == nullptr ? "" : e->name));
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
void Mutex::AssertReaderHeld() const {
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
- SynchEvent *e = GetSynchEvent(this);
- Y_ABSL_RAW_LOG(
- FATAL, "thread should hold at least a read lock on Mutex %p %s",
- static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ SynchEvent* e = GetSynchEvent(this);
+ Y_ABSL_RAW_LOG(FATAL,
+ "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
@@ -2494,13 +2492,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events
static const intptr_t kCvLow = 0x0003L; // low order bits of CV
// Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+enum {
+ kGdbCvSpin = kCvSpin,
+ kGdbCvEvent = kCvEvent,
+ kGdbCvLow = kCvLow,
+};
static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow");
-void CondVar::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+void CondVar::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -2511,25 +2513,23 @@ CondVar::~CondVar() {
}
}
-
// Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w = h;
+ PerThreadSynch* w = h;
while (w->next != s && w->next != h) { // search for thread
w = w->next;
}
- if (w->next == s) { // found thread; remove it
+ if (w->next == s) { // found thread; remove it
w->next = s->next;
if (h == s) {
h = (w == s) ? nullptr : w;
@@ -2538,7 +2538,7 @@ void CondVar::Remove(PerThreadSynch *s) {
s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
return;
@@ -2561,14 +2561,14 @@ void CondVar::Remove(PerThreadSynch *s) {
// variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the
// mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
+static void CondVarEnqueue(SynchWaitParams* waitp) {
// This thread might be transferred to the Mutex queue by Fer() when
// we are woken. To make sure that is what happens, Enqueue() doesn't
// call CondVarEnqueue() again but instead uses its normal code. We
// must do this before we queue ourselves so that cv_word will be null
// when seen by the dequeuer, who may wish immediately to requeue
// this thread on another queue.
- std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ std::atomic<intptr_t>* cv_word = waitp->cv_word;
waitp->cv_word = nullptr;
intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2581,8 +2581,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
v = cv_word->load(std::memory_order_relaxed);
}
Y_ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
- waitp->thread->waitp = waitp; // prepare ourselves for waiting
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h == nullptr) { // add this thread to waiter list
waitp->thread->next = waitp->thread;
} else {
@@ -2595,8 +2595,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
std::memory_order_release);
}
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
- bool rc = false; // return value; true iff we timed-out
+bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2663,27 +2663,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
return rc;
}
-bool CondVar::WaitWithTimeout(Mutex *mu, y_absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+bool CondVar::WaitWithTimeout(Mutex* mu, y_absl::Duration timeout) {
+ return WaitCommon(mu, KernelTimeout(timeout));
}
-bool CondVar::WaitWithDeadline(Mutex *mu, y_absl::Time deadline) {
+bool CondVar::WaitWithDeadline(Mutex* mu, y_absl::Time deadline) {
return WaitCommon(mu, KernelTimeout(deadline));
}
-void CondVar::Wait(Mutex *mu) {
- WaitCommon(mu, KernelTimeout::Never());
-}
+void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
// Wake thread w
// If it was a timed wait, w will be waiting on w->cv
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
+void CondVar::Wakeup(PerThreadSynch* w) {
if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
// The waiting thread only needs to observe "w->state == kAvailable" to be
// released, we must cache "cvmu" before clearing "next".
- Mutex *mu = w->waitp->cvmu;
+ Mutex* mu = w->waitp->cvmu;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
Mutex::IncrementSynchSem(mu, w);
@@ -2700,11 +2698,10 @@ void CondVar::Signal() {
for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
+ PerThreadSynch* w = nullptr;
if (h != nullptr) { // remove first waiter
w = h->next;
if (w == h) {
@@ -2713,11 +2710,11 @@ void CondVar::Signal() {
h->next = w->next;
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
if (w != nullptr) {
- CondVar::Wakeup(w); // wake waiter, if there was one
+ CondVar::Wakeup(w); // wake waiter, if there was one
cond_var_tracer("Signal wakeup", this);
}
if ((v & kCvEvent) != 0) {
@@ -2732,7 +2729,7 @@ void CondVar::Signal() {
Y_ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
}
-void CondVar::SignalAll () {
+void CondVar::SignalAll() {
Y_ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2746,11 +2743,11 @@ void CondVar::SignalAll () {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w;
- PerThreadSynch *n = h->next;
- do { // for every thread, wake it up
+ PerThreadSynch* w;
+ PerThreadSynch* n = h->next;
+ do { // for every thread, wake it up
w = n;
n = n->next;
CondVar::Wakeup(w);
@@ -2778,42 +2775,41 @@ void ReleasableMutexLock::Release() {
}
#ifdef Y_ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
+extern "C" void __tsan_read1(void* addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
#endif
// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
+static bool Dereference(void* arg) {
// ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg);
- return *(static_cast<bool *>(arg));
+ return *(static_cast<bool*>(arg));
}
Y_ABSL_CONST_INIT const Condition Condition::kTrue;
-Condition::Condition(bool (*func)(void *), void *arg)
- : eval_(&CallVoidPtrFunction),
- arg_(arg) {
+Condition::Condition(bool (*func)(void*), void* arg)
+ : eval_(&CallVoidPtrFunction), arg_(arg) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer passed to Condition.");
StoreCallback(func);
}
-bool Condition::CallVoidPtrFunction(const Condition *c) {
- using FunctionPointer = bool (*)(void *);
+bool Condition::CallVoidPtrFunction(const Condition* c) {
+ using FunctionPointer = bool (*)(void*);
FunctionPointer function_pointer;
std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
return (*function_pointer)(c->arg_);
}
-Condition::Condition(const bool *cond)
+Condition::Condition(const bool* cond)
: eval_(CallVoidPtrFunction),
// const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {
- using FunctionPointer = bool (*)(void *);
+ arg_(const_cast<bool*>(cond)) {
+ using FunctionPointer = bool (*)(void*);
const FunctionPointer dereference = Dereference;
StoreCallback(dereference);
}
@@ -2823,7 +2819,7 @@ bool Condition::Eval() const {
return (this->eval_ == nullptr) || (*this->eval_)(this);
}
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
// kTrue logic.
if (a == nullptr || a->eval_ == nullptr) {
return b == nullptr || b->eval_ == nullptr;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
index 7f1d5b0832..40aab749a8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
@@ -92,26 +92,42 @@ struct SynchWaitParams;
//
// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
// Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// a mutex is said to be *held*. By design, all mutexes support exclusive/write
// locks, as this is the most common way to use a mutex.
//
+// Mutex operations are only allowed under certain conditions; otherwise an
+// operation is "invalid", and disallowed by the API. The conditions concern
+// both the current state of the mutex and the identity of the threads that
+// are performing the operations.
+//
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
//
-// | | Lock() | Unlock() |
-// |----------------+------------+----------|
-// | Free | Exclusive | invalid |
-// | Exclusive | blocks | Free |
+// | | Lock() | Unlock() |
+// |----------------+------------------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks, then exclusive | Free |
+//
+// The full conditions are as follows.
+//
+// * Calls to `Unlock()` require that the mutex be held, and must be made in the
+// same thread that performed the corresponding `Lock()` operation which
+// acquired the mutex; otherwise the call is invalid.
+//
+// * The mutex being non-reentrant (or non-recursive) means that a call to
+// `Lock()` or `TryLock()` must not be made in a thread that already holds the
+// mutex; such a call is invalid.
//
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
+// * In other words, the state of being "held" has both a temporal component
+// (from `Lock()` until `Unlock()`) as well as a thread identity component:
+// the mutex is held *by a particular thread*.
//
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
+// An "invalid" operation has undefined behavior. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including, but not limited to,
// crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
+// data structures. In debug mode, the implementation may crash with a useful
+// error message.
//
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
// is, however, approximately fair over long periods, and starvation-free for
@@ -125,8 +141,9 @@ struct SynchWaitParams;
// issues that could potentially result in race conditions and deadlocks.
//
// For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
+// [Thread Safety
+// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
+// documentation.
//
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
@@ -257,7 +274,7 @@ class Y_ABSL_LOCKABLE Mutex {
// Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
//
// These methods may be used (along with the complementary `Reader*()`
- // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
// etc.) from reader/writer lock usage.
void WriterLock() Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
@@ -307,7 +324,7 @@ class Y_ABSL_LOCKABLE Mutex {
// `true`, `Await()` *may* skip the release/re-acquire step.
//
// `Await()` requires that this thread holds this `Mutex` in some mode.
- void Await(const Condition &cond);
+ void Await(const Condition& cond);
// Mutex::LockWhen()
// Mutex::ReaderLockWhen()
@@ -317,11 +334,11 @@ class Y_ABSL_LOCKABLE Mutex {
// be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
// logically equivalent to `*Lock(); Await();` though they may have different
// performance characteristics.
- void LockWhen(const Condition &cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ void LockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
- void ReaderLockWhen(const Condition &cond) Y_ABSL_SHARED_LOCK_FUNCTION();
+ void ReaderLockWhen(const Condition& cond) Y_ABSL_SHARED_LOCK_FUNCTION();
- void WriterLockWhen(const Condition &cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ void WriterLockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
this->LockWhen(cond);
}
@@ -346,9 +363,9 @@ class Y_ABSL_LOCKABLE Mutex {
// Negative timeouts are equivalent to a zero timeout.
//
// This method requires that this thread holds this `Mutex` in some mode.
- bool AwaitWithTimeout(const Condition &cond, y_absl::Duration timeout);
+ bool AwaitWithTimeout(const Condition& cond, y_absl::Duration timeout);
- bool AwaitWithDeadline(const Condition &cond, y_absl::Time deadline);
+ bool AwaitWithDeadline(const Condition& cond, y_absl::Time deadline);
// Mutex::LockWhenWithTimeout()
// Mutex::ReaderLockWhenWithTimeout()
@@ -361,11 +378,11 @@ class Y_ABSL_LOCKABLE Mutex {
// `true` on return.
//
// Negative timeouts are equivalent to a zero timeout.
- bool LockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ bool LockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ bool ReaderLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
Y_ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithTimeout(const Condition &cond, y_absl::Duration timeout)
+ bool WriterLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithTimeout(cond, timeout);
}
@@ -381,11 +398,11 @@ class Y_ABSL_LOCKABLE Mutex {
// on return.
//
// Deadlines in the past are equivalent to an immediate deadline.
- bool LockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ bool LockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ bool ReaderLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
Y_ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithDeadline(const Condition &cond, y_absl::Time deadline)
+ bool WriterLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithDeadline(cond, deadline);
}
@@ -407,7 +424,7 @@ class Y_ABSL_LOCKABLE Mutex {
// substantially reduce `Mutex` performance; it should be set only for
// non-production runs. Optimization options may also disable invariant
// checks.
- void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+ void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
// Mutex::EnableDebugLog()
//
@@ -416,7 +433,7 @@ class Y_ABSL_LOCKABLE Mutex {
// call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
//
// Note: This method substantially reduces `Mutex` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
// Deadlock detection
@@ -444,7 +461,7 @@ class Y_ABSL_LOCKABLE Mutex {
// A `MuHow` is a constant that indicates how a lock should be acquired.
// Internal implementation detail. Clients should ignore.
- typedef const struct MuHowS *MuHow;
+ typedef const struct MuHowS* MuHow;
// Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
//
@@ -466,37 +483,37 @@ class Y_ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
- static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
+ static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
synchronization_internal::KernelTimeout t);
// slow path acquire
- void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ void LockSlowLoop(SynchWaitParams* waitp, int flags);
// wrappers around LockSlowLoop()
- bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ bool LockSlowWithDeadline(MuHow how, const Condition* cond,
synchronization_internal::KernelTimeout t,
int flags);
- void LockSlow(MuHow how, const Condition *cond,
+ void LockSlow(MuHow how, const Condition* cond,
int flags) Y_ABSL_ATTRIBUTE_COLD;
// slow path release
- void UnlockSlow(SynchWaitParams *waitp) Y_ABSL_ATTRIBUTE_COLD;
+ void UnlockSlow(SynchWaitParams* waitp) Y_ABSL_ATTRIBUTE_COLD;
// Common code between Await() and AwaitWithTimeout/Deadline()
- bool AwaitCommon(const Condition &cond,
+ bool AwaitCommon(const Condition& cond,
synchronization_internal::KernelTimeout t);
// Attempt to remove thread s from queue.
- void TryRemove(base_internal::PerThreadSynch *s);
+ void TryRemove(base_internal::PerThreadSynch* s);
// Block a thread on mutex.
- void Block(base_internal::PerThreadSynch *s);
+ void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
- base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+ base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer(
- base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+ base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
// Catch the error of writing Mutex when intending MutexLock.
- Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+ explicit Mutex(const volatile Mutex* /*ignored*/) {}
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
@@ -531,28 +548,28 @@ class Y_ABSL_SCOPED_LOCKABLE MutexLock {
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
- explicit MutexLock(Mutex *mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit MutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed.
- explicit MutexLock(Mutex *mu, const Condition &cond)
+ explicit MutexLock(Mutex* mu, const Condition& cond)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
- MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
- MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
MutexLock& operator=(MutexLock&&) = delete;
~MutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// ReaderMutexLock
@@ -561,11 +578,11 @@ class Y_ABSL_SCOPED_LOCKABLE MutexLock {
// releases a shared lock on a `Mutex` via RAII.
class Y_ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public:
- explicit ReaderMutexLock(Mutex *mu) Y_ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit ReaderMutexLock(Mutex* mu) Y_ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
Y_ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLockWhen(cond);
@@ -579,7 +596,7 @@ class Y_ABSL_SCOPED_LOCKABLE ReaderMutexLock {
~ReaderMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// WriterMutexLock
@@ -588,12 +605,12 @@ class Y_ABSL_SCOPED_LOCKABLE ReaderMutexLock {
// releases a write (exclusive) lock on a `Mutex` via RAII.
class Y_ABSL_SCOPED_LOCKABLE WriterMutexLock {
public:
- explicit WriterMutexLock(Mutex *mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit WriterMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ explicit WriterMutexLock(Mutex* mu, const Condition& cond)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLockWhen(cond);
@@ -607,7 +624,7 @@ class Y_ABSL_SCOPED_LOCKABLE WriterMutexLock {
~WriterMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// -----------------------------------------------------------------------------
@@ -665,7 +682,7 @@ class Y_ABSL_SCOPED_LOCKABLE WriterMutexLock {
class Condition {
public:
// A Condition that returns the result of "(*func)(arg)"
- Condition(bool (*func)(void *), void *arg);
+ Condition(bool (*func)(void*), void* arg);
// Templated version for people who are averse to casts.
//
@@ -676,8 +693,22 @@ class Condition {
// Note: lambdas in this case must contain no bound variables.
//
// See class comment for performance advice.
- template<typename T>
- Condition(bool (*func)(T *), T *arg);
+ template <typename T>
+ Condition(bool (*func)(T*), T* arg);
+
+ // Same as above, but allows for cases where `arg` comes from a pointer that
+ // is convertible to the function parameter type `T*` but not an exact match.
+ //
+ // For example, the argument might be `X*` but the function takes `const X*`,
+ // or the argument might be `Derived*` while the function takes `Base*`, and
+ // so on for cases where the argument pointer can be implicitly converted.
+ //
+ // Implementation notes: This constructor overload is required in addition to
+ // the one above to allow deduction of `T` from `arg` for cases such as where
+ // a function template is passed as `func`. Also, the dummy `typename = void`
+ // template parameter exists just to work around a MSVC mangling bug.
+ template <typename T, typename = void>
+ Condition(bool (*func)(T*), typename y_absl::internal::identity<T>::type* arg);
// Templated version for invoking a method that returns a `bool`.
//
@@ -687,16 +718,16 @@ class Condition {
// Implementation Note: `y_absl::internal::identity` is used to allow methods to
// come from base classes. A simpler signature like
// `Condition(T*, bool (T::*)())` does not suffice.
- template<typename T>
- Condition(T *object, bool (y_absl::internal::identity<T>::type::* method)());
+ template <typename T>
+ Condition(T* object, bool (y_absl::internal::identity<T>::type::*method)());
// Same as above, for const members
- template<typename T>
- Condition(const T *object,
- bool (y_absl::internal::identity<T>::type::* method)() const);
+ template <typename T>
+ Condition(const T* object,
+ bool (y_absl::internal::identity<T>::type::*method)() const);
// A Condition that returns the value of `*cond`
- explicit Condition(const bool *cond);
+ explicit Condition(const bool* cond);
// Templated version for invoking a functor that returns a `bool`.
// This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -723,12 +754,22 @@ class Condition {
// Implementation note: The second template parameter ensures that this
// constructor doesn't participate in overload resolution if T doesn't have
// `bool operator() const`.
- template <typename T, typename E = decltype(
- static_cast<bool (T::*)() const>(&T::operator()))>
- explicit Condition(const T *obj)
+ template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
+ &T::operator()))>
+ explicit Condition(const T* obj)
: Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
// A Condition that always returns `true`.
+ // kTrue is only useful in a narrow set of circumstances, mostly when
+ // it's passed conditionally. For example:
+ //
+ // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
+ //
+ // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
+ // don't return immediately when the timeout happens, they still block until
+ // the Mutex becomes available. The return value of these methods does
+ // not indicate if the timeout was reached; rather it indicates whether or
+ // not the condition is true.
Y_ABSL_CONST_INIT static const Condition kTrue;
// Evaluates the condition.
@@ -741,7 +782,7 @@ class Condition {
// Two `Condition` values are guaranteed equal if both their `func` and `arg`
// components are the same. A null pointer is equivalent to a `true`
// condition.
- static bool GuaranteedEqual(const Condition *a, const Condition *b);
+ static bool GuaranteedEqual(const Condition* a, const Condition* b);
private:
// Sizing an allocation for a method pointer can be subtle. In the Itanium
@@ -769,12 +810,14 @@ class Condition {
bool (*eval_)(const Condition*) = nullptr;
// Either an argument for a function call or an object for a method call.
- void *arg_ = nullptr;
+ void* arg_ = nullptr;
// Various functions eval_ can point to:
static bool CallVoidPtrFunction(const Condition*);
- template <typename T> static bool CastAndCallFunction(const Condition* c);
- template <typename T> static bool CastAndCallMethod(const Condition* c);
+ template <typename T>
+ static bool CastAndCallFunction(const Condition* c);
+ template <typename T>
+ static bool CastAndCallMethod(const Condition* c);
// Helper methods for storing, validating, and reading callback arguments.
template <typename T>
@@ -786,7 +829,7 @@ class Condition {
}
template <typename T>
- inline void ReadCallback(T *callback) const {
+ inline void ReadCallback(T* callback) const {
std::memcpy(callback, callback_, sizeof(*callback));
}
@@ -843,7 +886,7 @@ class CondVar {
// spurious wakeup), then reacquires the `Mutex` and returns.
//
// Requires and ensures that the current thread holds the `Mutex`.
- void Wait(Mutex *mu);
+ void Wait(Mutex* mu);
// CondVar::WaitWithTimeout()
//
@@ -858,7 +901,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithTimeout(Mutex *mu, y_absl::Duration timeout);
+ bool WaitWithTimeout(Mutex* mu, y_absl::Duration timeout);
// CondVar::WaitWithDeadline()
//
@@ -875,7 +918,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithDeadline(Mutex *mu, y_absl::Time deadline);
+ bool WaitWithDeadline(Mutex* mu, y_absl::Time deadline);
// CondVar::Signal()
//
@@ -892,18 +935,17 @@ class CondVar {
// Causes all subsequent uses of this `CondVar` to be logged via
// `Y_ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
// Note: this method substantially reduces `CondVar` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
private:
- bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
- void Remove(base_internal::PerThreadSynch *s);
- void Wakeup(base_internal::PerThreadSynch *w);
+ bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch* s);
+ void Wakeup(base_internal::PerThreadSynch* w);
std::atomic<intptr_t> cv_; // Condition variable state.
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
};
-
// Variants of MutexLock.
//
// If you find yourself using one of these, consider instead using
@@ -914,14 +956,14 @@ class CondVar {
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class Y_ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public:
- explicit MutexLockMaybe(Mutex *mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit MutexLockMaybe(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
this->mu_->Lock();
}
}
- explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
@@ -930,11 +972,13 @@ class Y_ABSL_SCOPED_LOCKABLE MutexLockMaybe {
}
~MutexLockMaybe() Y_ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
private:
- Mutex *const mu_;
+ Mutex* const mu_;
MutexLockMaybe(const MutexLockMaybe&) = delete;
MutexLockMaybe(MutexLockMaybe&&) = delete;
MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -947,25 +991,27 @@ class Y_ABSL_SCOPED_LOCKABLE MutexLockMaybe {
// mutex before destruction. `Release()` may be called at most once.
class Y_ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public:
- explicit ReleasableMutexLock(Mutex *mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit ReleasableMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->Lock();
}
- explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
~ReleasableMutexLock() Y_ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
void Release() Y_ABSL_UNLOCK_FUNCTION();
private:
- Mutex *mu_;
+ Mutex* mu_;
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock(ReleasableMutexLock&&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
@@ -982,8 +1028,8 @@ inline CondVar::CondVar() : cv_(0) {}
// static
template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
- T *object = static_cast<T *>(c->arg_);
+bool Condition::CastAndCallMethod(const Condition* c) {
+ T* object = static_cast<T*>(c->arg_);
bool (T::*method_pointer)();
c->ReadCallback(&method_pointer);
return (object->*method_pointer)();
@@ -991,38 +1037,43 @@ bool Condition::CastAndCallMethod(const Condition *c) {
// static
template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
- bool (*function)(T *);
+bool Condition::CastAndCallFunction(const Condition* c) {
+ bool (*function)(T*);
c->ReadCallback(&function);
- T *argument = static_cast<T *>(c->arg_);
+ T* argument = static_cast<T*>(c->arg_);
return (*function)(argument);
}
template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
+inline Condition::Condition(bool (*func)(T*), T* arg)
: eval_(&CastAndCallFunction<T>),
- arg_(const_cast<void *>(static_cast<const void *>(arg))) {
+ arg_(const_cast<void*>(static_cast<const void*>(arg))) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer was passed to Condition.");
StoreCallback(func);
}
+template <typename T, typename>
+inline Condition::Condition(bool (*func)(T*),
+ typename y_absl::internal::identity<T>::type* arg)
+ // Just delegate to the overload above.
+ : Condition(func, arg) {}
+
template <typename T>
-inline Condition::Condition(T *object,
+inline Condition::Condition(T* object,
bool (y_absl::internal::identity<T>::type::*method)())
- : eval_(&CastAndCallMethod<T>),
- arg_(object) {
+ : eval_(&CastAndCallMethod<T>), arg_(object) {
static_assert(sizeof(&method) <= sizeof(callback_),
"An overlarge method pointer was passed to Condition.");
StoreCallback(method);
}
template <typename T>
-inline Condition::Condition(const T *object,
+inline Condition::Condition(const T* object,
bool (y_absl::internal::identity<T>::type::*method)()
const)
: eval_(&CastAndCallMethod<T>),
- arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {
+ arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
StoreCallback(method);
}
@@ -1052,7 +1103,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles));
// Register a hook for CondVar tracing.
@@ -1067,24 +1118,7 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer. This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same ordering and single-use limitations as
-// RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is y_absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-Y_ABSL_DEPRECATED("y_absl::RegisterSymbolizer() is deprecated and will be removed "
- "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
void ResetDeadlockGraphMu();
@@ -1103,7 +1137,7 @@ void EnableMutexInvariantDebugging(bool enabled);
enum class OnDeadlockCycle {
kIgnore, // Neither report on nor attempt to track cycles in lock ordering
kReport, // Report lock cycles to stderr when detected
- kAbort, // Report lock cycles to stderr when detected, then abort
+ kAbort, // Report lock cycles to stderr when detected, then abort
};
// SetMutexDeadlockDetectionMode()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
index def50a2091..692c6d2136 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
@@ -24,9 +24,15 @@ SRCS(
barrier.cc
blocking_counter.cc
internal/create_thread_identity.cc
+ internal/futex_waiter.cc
internal/graphcycles.cc
+ internal/kernel_timeout.cc
internal/per_thread_sem.cc
- internal/waiter.cc
+ internal/pthread_waiter.cc
+ internal/sem_waiter.cc
+ internal/stdcpp_waiter.cc
+ internal/waiter_base.cc
+ internal/win32_waiter.cc
mutex.cc
notification.cc
)
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc
index 7503e97a68..332b9978f9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/clock.cc
@@ -48,17 +48,16 @@ Time Now() {
Y_ABSL_NAMESPACE_END
} // namespace y_absl
-// Decide if we should use the fast GetCurrentTimeNanos() algorithm
-// based on the cyclecounter, otherwise just get the time directly
-// from the OS on every call. This can be chosen at compile-time via
+// Decide if we should use the fast GetCurrentTimeNanos() algorithm based on the
+// cyclecounter, otherwise just get the time directly from the OS on every call.
+// By default, the fast algorithm based on the cyclecount is disabled because in
+// certain situations, for example, if the OS enters a "sleep" mode, it may
+// produce incorrect values immediately upon waking.
+// This can be chosen at compile-time via
// -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
#ifndef Y_ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
-#if Y_ABSL_USE_UNSCALED_CYCLECLOCK
-#define Y_ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
-#else
#define Y_ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
#endif
-#endif
#if defined(__APPLE__) || defined(_WIN32)
#include "y_absl/time/internal/get_current_time_chrono.inc"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
index 55b8a04117..f6b2f464dc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/duration.cc
@@ -96,13 +96,6 @@ inline bool IsValidDivisor(double d) {
return d != 0.0;
}
-// Can't use std::round() because it is only available in C++11.
-// Note that we ignore the possibility of floating-point over/underflow.
-template <typename Double>
-inline double Round(Double d) {
- return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
-}
-
// *sec may be positive or negative. *ticks must be in the range
// -kTicksPerSecond < *ticks < kTicksPerSecond. If *ticks is negative it
// will be normalized to a positive value by adjusting *sec accordingly.
@@ -260,7 +253,7 @@ inline Duration ScaleDouble(Duration d, double r) {
double lo_frac = std::modf(lo_doub, &lo_int);
// Rolls lo into hi if necessary.
- int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+ int64_t lo64 = std::round(lo_frac * kTicksPerSecond);
Duration ans;
if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
@@ -407,16 +400,18 @@ int64_t IDivDuration(bool satq, const Duration num, const Duration den,
Duration& Duration::operator+=(Duration rhs) {
if (time_internal::IsInfiniteDuration(*this)) return *this;
if (time_internal::IsInfiniteDuration(rhs)) return *this = rhs;
- const int64_t orig_rep_hi = rep_hi_;
- rep_hi_ =
- DecodeTwosComp(EncodeTwosComp(rep_hi_) + EncodeTwosComp(rhs.rep_hi_));
+ const int64_t orig_rep_hi = rep_hi_.Get();
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) +
+ EncodeTwosComp(rhs.rep_hi_.Get()));
if (rep_lo_ >= kTicksPerSecond - rhs.rep_lo_) {
- rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) + 1);
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) + 1);
rep_lo_ -= kTicksPerSecond;
}
rep_lo_ += rhs.rep_lo_;
- if (rhs.rep_hi_ < 0 ? rep_hi_ > orig_rep_hi : rep_hi_ < orig_rep_hi) {
- return *this = rhs.rep_hi_ < 0 ? -InfiniteDuration() : InfiniteDuration();
+ if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() > orig_rep_hi
+ : rep_hi_.Get() < orig_rep_hi) {
+ return *this =
+ rhs.rep_hi_.Get() < 0 ? -InfiniteDuration() : InfiniteDuration();
}
return *this;
}
@@ -424,18 +419,21 @@ Duration& Duration::operator+=(Duration rhs) {
Duration& Duration::operator-=(Duration rhs) {
if (time_internal::IsInfiniteDuration(*this)) return *this;
if (time_internal::IsInfiniteDuration(rhs)) {
- return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+ : InfiniteDuration();
}
- const int64_t orig_rep_hi = rep_hi_;
- rep_hi_ =
- DecodeTwosComp(EncodeTwosComp(rep_hi_) - EncodeTwosComp(rhs.rep_hi_));
+ const int64_t orig_rep_hi = rep_hi_.Get();
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) -
+ EncodeTwosComp(rhs.rep_hi_.Get()));
if (rep_lo_ < rhs.rep_lo_) {
- rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) - 1);
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) - 1);
rep_lo_ += kTicksPerSecond;
}
rep_lo_ -= rhs.rep_lo_;
- if (rhs.rep_hi_ < 0 ? rep_hi_ < orig_rep_hi : rep_hi_ > orig_rep_hi) {
- return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() < orig_rep_hi
+ : rep_hi_.Get() > orig_rep_hi) {
+ return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+ : InfiniteDuration();
}
return *this;
}
@@ -446,7 +444,7 @@ Duration& Duration::operator-=(Duration rhs) {
Duration& Duration::operator*=(int64_t r) {
if (time_internal::IsInfiniteDuration(*this)) {
- const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleFixed<SafeMultiply>(*this, r);
@@ -454,7 +452,7 @@ Duration& Duration::operator*=(int64_t r) {
Duration& Duration::operator*=(double r) {
if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) {
- const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleDouble<std::multiplies>(*this, r);
@@ -462,7 +460,7 @@ Duration& Duration::operator*=(double r) {
Duration& Duration::operator/=(int64_t r) {
if (time_internal::IsInfiniteDuration(*this) || r == 0) {
- const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleFixed<std::divides>(*this, r);
@@ -470,7 +468,7 @@ Duration& Duration::operator/=(int64_t r) {
Duration& Duration::operator/=(double r) {
if (time_internal::IsInfiniteDuration(*this) || !IsValidDivisor(r)) {
- const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleDouble<std::divides>(*this, r);
@@ -741,7 +739,7 @@ void AppendNumberUnit(TString* out, double n, DisplayUnit unit) {
char buf[kBufferSize]; // also large enough to hold integer part
char* ep = buf + sizeof(buf);
double d = 0;
- int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+ int64_t frac_part = std::round(std::modf(n, &d) * unit.pow10);
int64_t int_part = d;
if (int_part != 0 || frac_part != 0) {
char* bp = Format64(ep, 0, int_part); // always < 1000
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h
index 3c213c1da4..ffc48a176e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/include/cctz/time_zone.h
@@ -23,6 +23,7 @@
#include <chrono>
#include <cstdint>
#include <limits>
+#include <ratio> // NOLINT: We use std::ratio in this header
#include <util/generic/string.h>
#include <utility>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc
index a204acf61d..3edeffc34b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_fixed.cc
@@ -105,7 +105,7 @@ TString FixedOffsetToName(const seconds& offset) {
offset_minutes %= 60;
const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
char buf[prefix_len + sizeof("-24:00:00")];
- char* ep = std::copy(kFixedZonePrefix, kFixedZonePrefix + prefix_len, buf);
+ char* ep = std::copy_n(kFixedZonePrefix, prefix_len, buf);
*ep++ = sign;
ep = Format02d(ep, offset_hours);
*ep++ = ':';
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc
index 5e8f790ec0..0d15a14290 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_format.cc
@@ -13,14 +13,8 @@
// limitations under the License.
#if !defined(HAS_STRPTIME)
-#if !defined(_MSC_VER) && !defined(__MINGW32__)
-#define HAS_STRPTIME 1 // assume everyone has strptime() except windows
-#endif
-#endif
-
-#if defined(HAS_STRPTIME) && HAS_STRPTIME
-#if !defined(_XOPEN_SOURCE) && !defined(__OpenBSD__)
-#define _XOPEN_SOURCE // Definedness suffices for strptime.
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__VXWORKS__)
+#define HAS_STRPTIME 1 // Assume everyone else has strptime().
#endif
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc
index 26074ebe75..7daac45389 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.cc
@@ -23,17 +23,19 @@ Y_ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
-std::unique_ptr<TimeZoneIf> TimeZoneIf::Load(const TString& name) {
+std::unique_ptr<TimeZoneIf> TimeZoneIf::UTC() { return TimeZoneInfo::UTC(); }
+
+std::unique_ptr<TimeZoneIf> TimeZoneIf::Make(const TString& name) {
// Support "libc:localtime" and "libc:*" to access the legacy
// localtime and UTC support respectively from the C library.
+ // NOTE: The "libc:*" zones are internal, test-only interfaces, and
+ // are subject to change/removal without notice. Do not use them.
if (name.compare(0, 5, "libc:") == 0) {
- return std::unique_ptr<TimeZoneIf>(new TimeZoneLibC(name.substr(5)));
+ return TimeZoneLibC::Make(name.substr(5));
}
- // Otherwise use the "zoneinfo" implementation by default.
- std::unique_ptr<TimeZoneInfo> tz(new TimeZoneInfo);
- if (!tz->Load(name)) tz.reset();
- return std::unique_ptr<TimeZoneIf>(tz.release());
+ // Otherwise use the "zoneinfo" implementation.
+ return TimeZoneInfo::Make(name);
}
// Defined out-of-line to avoid emitting a weak vtable in all TUs.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h
index 949bfff7ae..a8e31ecb2f 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_if.h
@@ -33,8 +33,9 @@ namespace cctz {
// Subclasses implement the functions for civil-time conversions in the zone.
class TimeZoneIf {
public:
- // A factory function for TimeZoneIf implementations.
- static std::unique_ptr<TimeZoneIf> Load(const TString& name);
+ // Factory functions for TimeZoneIf implementations.
+ static std::unique_ptr<TimeZoneIf> UTC(); // never fails
+ static std::unique_ptr<TimeZoneIf> Make(const TString& name);
virtual ~TimeZoneIf();
@@ -51,7 +52,9 @@ class TimeZoneIf {
virtual TString Description() const = 0;
protected:
- TimeZoneIf() {}
+ TimeZoneIf() = default;
+ TimeZoneIf(const TimeZoneIf&) = delete;
+ TimeZoneIf& operator=(const TimeZoneIf&) = delete;
};
// Convert between time_point<seconds> and a count of seconds since the
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc
index 09cab23ef4..347abf64d0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.cc
@@ -99,11 +99,13 @@ void time_zone::Impl::ClearTimeZoneMapTestOnly() {
}
}
+time_zone::Impl::Impl() : name_("UTC"), zone_(TimeZoneIf::UTC()) {}
+
time_zone::Impl::Impl(const TString& name)
- : name_(name), zone_(TimeZoneIf::Load(name_)) {}
+ : name_(name), zone_(TimeZoneIf::Make(name_)) {}
const time_zone::Impl* time_zone::Impl::UTCImpl() {
- static const Impl* utc_impl = new Impl("UTC"); // never fails
+ static const Impl* utc_impl = new Impl;
return utc_impl;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h
index 76dc84496a..52941a0fe6 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_impl.h
@@ -78,7 +78,11 @@ class time_zone::Impl {
TString Description() const { return zone_->Description(); }
private:
+ Impl();
explicit Impl(const TString& name);
+ Impl(const Impl&) = delete;
+ Impl& operator=(const Impl&) = delete;
+
static const Impl* UTCImpl();
const TString name_;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc
index 3444f6ced9..eced23a362 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.cc
@@ -45,6 +45,7 @@
#include <sstream>
#include <util/generic/string.h>
#include <utility>
+#include <vector>
#include "y_absl/base/config.h"
#include "y_absl/time/internal/cctz/include/cctz/civil_time.h"
@@ -134,6 +135,49 @@ std::int_fast64_t Decode64(const char* cp) {
return static_cast<std::int_fast64_t>(v - s64maxU - 1) - s64max - 1;
}
+struct Header { // counts of:
+ std::size_t timecnt; // transition times
+ std::size_t typecnt; // transition types
+ std::size_t charcnt; // zone abbreviation characters
+ std::size_t leapcnt; // leap seconds (we expect none)
+ std::size_t ttisstdcnt; // UTC/local indicators (unused)
+ std::size_t ttisutcnt; // standard/wall indicators (unused)
+
+ bool Build(const tzhead& tzh);
+ std::size_t DataLength(std::size_t time_len) const;
+};
+
+// Builds the in-memory header using the raw bytes from the file.
+bool Header::Build(const tzhead& tzh) {
+ std::int_fast32_t v;
+ if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
+ timecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
+ typecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
+ charcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
+ leapcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
+ ttisstdcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
+ ttisutcnt = static_cast<std::size_t>(v);
+ return true;
+}
+
+// How many bytes of data are associated with this header. The result
+// depends upon whether this is a section with 4-byte or 8-byte times.
+std::size_t Header::DataLength(std::size_t time_len) const {
+ std::size_t len = 0;
+ len += (time_len + 1) * timecnt; // unix_time + type_index
+ len += (4 + 1 + 1) * typecnt; // utc_offset + is_dst + abbr_index
+ len += 1 * charcnt; // abbreviations
+ len += (time_len + 4) * leapcnt; // leap-time + TAI-UTC
+ len += 1 * ttisstdcnt; // UTC/local indicators
+ len += 1 * ttisutcnt; // standard/wall indicators
+ return len;
+}
+
// Does the rule for future transitions call for year-round daylight time?
// See tz/zic.c:stringzone() for the details on how such rules are encoded.
bool AllYearDST(const PosixTimeZone& posix) {
@@ -217,98 +261,6 @@ inline civil_second YearShift(const civil_second& cs, year_t shift) {
} // namespace
-// What (no leap-seconds) UTC+seconds zoneinfo would look like.
-bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
- transition_types_.resize(1);
- TransitionType& tt(transition_types_.back());
- tt.utc_offset = static_cast<std::int_least32_t>(offset.count());
- tt.is_dst = false;
- tt.abbr_index = 0;
-
- // We temporarily add some redundant, contemporary (2015 through 2025)
- // transitions for performance reasons. See TimeZoneInfo::LocalTime().
- // TODO: Fix the performance issue and remove the extra transitions.
- transitions_.clear();
- transitions_.reserve(12);
- for (const std::int_fast64_t unix_time : {
- -(1LL << 59), // a "first half" transition
- 1420070400LL, // 2015-01-01T00:00:00+00:00
- 1451606400LL, // 2016-01-01T00:00:00+00:00
- 1483228800LL, // 2017-01-01T00:00:00+00:00
- 1514764800LL, // 2018-01-01T00:00:00+00:00
- 1546300800LL, // 2019-01-01T00:00:00+00:00
- 1577836800LL, // 2020-01-01T00:00:00+00:00
- 1609459200LL, // 2021-01-01T00:00:00+00:00
- 1640995200LL, // 2022-01-01T00:00:00+00:00
- 1672531200LL, // 2023-01-01T00:00:00+00:00
- 1704067200LL, // 2024-01-01T00:00:00+00:00
- 1735689600LL, // 2025-01-01T00:00:00+00:00
- }) {
- Transition& tr(*transitions_.emplace(transitions_.end()));
- tr.unix_time = unix_time;
- tr.type_index = 0;
- tr.civil_sec = LocalTime(tr.unix_time, tt).cs;
- tr.prev_civil_sec = tr.civil_sec - 1;
- }
-
- default_transition_type_ = 0;
- abbreviations_ = FixedOffsetToAbbr(offset);
- abbreviations_.append(1, '\0');
- future_spec_.clear(); // never needed for a fixed-offset zone
- extended_ = false;
-
- tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
- tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
-
- transitions_.shrink_to_fit();
- return true;
-}
-
-// Builds the in-memory header using the raw bytes from the file.
-bool TimeZoneInfo::Header::Build(const tzhead& tzh) {
- std::int_fast32_t v;
- if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
- timecnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
- typecnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
- charcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
- leapcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
- ttisstdcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
- ttisutcnt = static_cast<std::size_t>(v);
- return true;
-}
-
-// How many bytes of data are associated with this header. The result
-// depends upon whether this is a section with 4-byte or 8-byte times.
-std::size_t TimeZoneInfo::Header::DataLength(std::size_t time_len) const {
- std::size_t len = 0;
- len += (time_len + 1) * timecnt; // unix_time + type_index
- len += (4 + 1 + 1) * typecnt; // utc_offset + is_dst + abbr_index
- len += 1 * charcnt; // abbreviations
- len += (time_len + 4) * leapcnt; // leap-time + TAI-UTC
- len += 1 * ttisstdcnt; // UTC/local indicators
- len += 1 * ttisutcnt; // standard/wall indicators
- return len;
-}
-
-// zic(8) can generate no-op transitions when a zone changes rules at an
-// instant when there is actually no discontinuity. So we check whether
-// two transitions have equivalent types (same offset/is_dst/abbr).
-bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
- std::uint_fast8_t tt2_index) const {
- if (tt1_index == tt2_index) return true;
- const TransitionType& tt1(transition_types_[tt1_index]);
- const TransitionType& tt2(transition_types_[tt2_index]);
- if (tt1.utc_offset != tt2.utc_offset) return false;
- if (tt1.is_dst != tt2.is_dst) return false;
- if (tt1.abbr_index != tt2.abbr_index) return false;
- return true;
-}
-
// Find/make a transition type with these attributes.
bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
const TString& abbr,
@@ -341,6 +293,20 @@ bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
return true;
}
+// zic(8) can generate no-op transitions when a zone changes rules at an
+// instant when there is actually no discontinuity. So we check whether
+// two transitions have equivalent types (same offset/is_dst/abbr).
+bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
+ std::uint_fast8_t tt2_index) const {
+ if (tt1_index == tt2_index) return true;
+ const TransitionType& tt1(transition_types_[tt1_index]);
+ const TransitionType& tt2(transition_types_[tt2_index]);
+ if (tt1.utc_offset != tt2.utc_offset) return false;
+ if (tt1.is_dst != tt2.is_dst) return false;
+ if (tt1.abbr_index != tt2.abbr_index) return false;
+ return true;
+}
+
// Use the POSIX-TZ-environment-variable-style string to handle times
// in years after the last transition stored in the zoneinfo data.
bool TimeZoneInfo::ExtendTransitions() {
@@ -372,11 +338,13 @@ bool TimeZoneInfo::ExtendTransitions() {
return EquivTransitions(transitions_.back().type_index, dst_ti);
}
- // Extend the transitions for an additional 400 years using the
- // future specification. Years beyond those can be handled by
- // mapping back to a cycle-equivalent year within that range.
- // We may need two additional transitions for the current year.
- transitions_.reserve(transitions_.size() + 400 * 2 + 2);
+ // Extend the transitions for an additional 401 years using the future
+ // specification. Years beyond those can be handled by mapping back to
+ // a cycle-equivalent year within that range. Note that we need 401
+ // (well, at least the first transition in the 401st year) so that the
+ // end of the 400th year is mapped back to an extended year. And first
+ // we may also need two additional transitions for the current year.
+ transitions_.reserve(transitions_.size() + 2 + 401 * 2);
extended_ = true;
const Transition& last(transitions_.back());
@@ -390,7 +358,7 @@ bool TimeZoneInfo::ExtendTransitions() {
Transition dst = {0, dst_ti, civil_second(), civil_second()};
Transition std = {0, std_ti, civil_second(), civil_second()};
- for (const year_t limit = last_year_ + 400;; ++last_year_) {
+ for (const year_t limit = last_year_ + 401;; ++last_year_) {
auto dst_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_start);
auto std_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_end);
dst.unix_time = jan1_time + dst_trans_off - posix.std_offset;
@@ -410,193 +378,6 @@ bool TimeZoneInfo::ExtendTransitions() {
return true;
}
-bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
- // Read and validate the header.
- tzhead tzh;
- if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
- if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
- return false;
- Header hdr;
- if (!hdr.Build(tzh)) return false;
- std::size_t time_len = 4;
- if (tzh.tzh_version[0] != '\0') {
- // Skip the 4-byte data.
- if (zip->Skip(hdr.DataLength(time_len)) != 0) return false;
- // Read and validate the header for the 8-byte data.
- if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
- if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
- return false;
- if (tzh.tzh_version[0] == '\0') return false;
- if (!hdr.Build(tzh)) return false;
- time_len = 8;
- }
- if (hdr.typecnt == 0) return false;
- if (hdr.leapcnt != 0) {
- // This code assumes 60-second minutes so we do not want
- // the leap-second encoded zoneinfo. We could reverse the
- // compensation, but the "right" encoding is rarely used
- // so currently we simply reject such data.
- return false;
- }
- if (hdr.ttisstdcnt != 0 && hdr.ttisstdcnt != hdr.typecnt) return false;
- if (hdr.ttisutcnt != 0 && hdr.ttisutcnt != hdr.typecnt) return false;
-
- // Read the data into a local buffer.
- std::size_t len = hdr.DataLength(time_len);
- std::vector<char> tbuf(len);
- if (zip->Read(tbuf.data(), len) != len) return false;
- const char* bp = tbuf.data();
-
- // Decode and validate the transitions.
- transitions_.reserve(hdr.timecnt + 2);
- transitions_.resize(hdr.timecnt);
- for (std::size_t i = 0; i != hdr.timecnt; ++i) {
- transitions_[i].unix_time = (time_len == 4) ? Decode32(bp) : Decode64(bp);
- bp += time_len;
- if (i != 0) {
- // Check that the transitions are ordered by time (as zic guarantees).
- if (!Transition::ByUnixTime()(transitions_[i - 1], transitions_[i]))
- return false; // out of order
- }
- }
- bool seen_type_0 = false;
- for (std::size_t i = 0; i != hdr.timecnt; ++i) {
- transitions_[i].type_index = Decode8(bp++);
- if (transitions_[i].type_index >= hdr.typecnt) return false;
- if (transitions_[i].type_index == 0) seen_type_0 = true;
- }
-
- // Decode and validate the transition types.
- transition_types_.reserve(hdr.typecnt + 2);
- transition_types_.resize(hdr.typecnt);
- for (std::size_t i = 0; i != hdr.typecnt; ++i) {
- transition_types_[i].utc_offset =
- static_cast<std::int_least32_t>(Decode32(bp));
- if (transition_types_[i].utc_offset >= kSecsPerDay ||
- transition_types_[i].utc_offset <= -kSecsPerDay)
- return false;
- bp += 4;
- transition_types_[i].is_dst = (Decode8(bp++) != 0);
- transition_types_[i].abbr_index = Decode8(bp++);
- if (transition_types_[i].abbr_index >= hdr.charcnt) return false;
- }
-
- // Determine the before-first-transition type.
- default_transition_type_ = 0;
- if (seen_type_0 && hdr.timecnt != 0) {
- std::uint_fast8_t index = 0;
- if (transition_types_[0].is_dst) {
- index = transitions_[0].type_index;
- while (index != 0 && transition_types_[index].is_dst) --index;
- }
- while (index != hdr.typecnt && transition_types_[index].is_dst) ++index;
- if (index != hdr.typecnt) default_transition_type_ = index;
- }
-
- // Copy all the abbreviations.
- abbreviations_.reserve(hdr.charcnt + 10);
- abbreviations_.assign(bp, hdr.charcnt);
- bp += hdr.charcnt;
-
- // Skip the unused portions. We've already dispensed with leap-second
- // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
- // interpreting a POSIX spec that does not include start/end rules, and
- // that isn't the case here (see "zic -p").
- bp += (time_len + 4) * hdr.leapcnt; // leap-time + TAI-UTC
- bp += 1 * hdr.ttisstdcnt; // UTC/local indicators
- bp += 1 * hdr.ttisutcnt; // standard/wall indicators
- assert(bp == tbuf.data() + tbuf.size());
-
- future_spec_.clear();
- if (tzh.tzh_version[0] != '\0') {
- // Snarf up the NL-enclosed future POSIX spec. Note
- // that version '3' files utilize an extended format.
- auto get_char = [](ZoneInfoSource* azip) -> int {
- unsigned char ch; // all non-EOF results are positive
- return (azip->Read(&ch, 1) == 1) ? ch : EOF;
- };
- if (get_char(zip) != '\n') return false;
- for (int c = get_char(zip); c != '\n'; c = get_char(zip)) {
- if (c == EOF) return false;
- future_spec_.push_back(static_cast<char>(c));
- }
- }
-
- // We don't check for EOF so that we're forwards compatible.
-
- // If we did not find version information during the standard loading
- // process (as of tzh_version '3' that is unsupported), then ask the
- // ZoneInfoSource for any out-of-bound version string it may be privy to.
- if (version_.empty()) {
- version_ = zip->Version();
- }
-
- // Trim redundant transitions. zic may have added these to work around
- // differences between the glibc and reference implementations (see
- // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just
- // get in the way when we do future_spec_ extension.
- while (hdr.timecnt > 1) {
- if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
- transitions_[hdr.timecnt - 2].type_index)) {
- break;
- }
- hdr.timecnt -= 1;
- }
- transitions_.resize(hdr.timecnt);
-
- // Ensure that there is always a transition in the first half of the
- // time line (the second half is handled below) so that the signed
- // difference between a civil_second and the civil_second of its
- // previous transition is always representable, without overflow.
- if (transitions_.empty() || transitions_.front().unix_time >= 0) {
- Transition& tr(*transitions_.emplace(transitions_.begin()));
- tr.unix_time = -(1LL << 59); // -18267312070-10-26T17:01:52+00:00
- tr.type_index = default_transition_type_;
- }
-
- // Extend the transitions using the future specification.
- if (!ExtendTransitions()) return false;
-
- // Ensure that there is always a transition in the second half of the
- // time line (the first half is handled above) so that the signed
- // difference between a civil_second and the civil_second of its
- // previous transition is always representable, without overflow.
- const Transition& last(transitions_.back());
- if (last.unix_time < 0) {
- const std::uint_fast8_t type_index = last.type_index;
- Transition& tr(*transitions_.emplace(transitions_.end()));
- tr.unix_time = 2147483647; // 2038-01-19T03:14:07+00:00
- tr.type_index = type_index;
- }
-
- // Compute the local civil time for each transition and the preceding
- // second. These will be used for reverse conversions in MakeTime().
- const TransitionType* ttp = &transition_types_[default_transition_type_];
- for (std::size_t i = 0; i != transitions_.size(); ++i) {
- Transition& tr(transitions_[i]);
- tr.prev_civil_sec = LocalTime(tr.unix_time, *ttp).cs - 1;
- ttp = &transition_types_[tr.type_index];
- tr.civil_sec = LocalTime(tr.unix_time, *ttp).cs;
- if (i != 0) {
- // Check that the transitions are ordered by civil time. Essentially
- // this means that an offset change cannot cross another such change.
- // No one does this in practice, and we depend on it in MakeTime().
- if (!Transition::ByCivilTime()(transitions_[i - 1], tr))
- return false; // out of order
- }
- }
-
- // Compute the maximum/minimum civil times that can be converted to a
- // time_point<seconds> for each of the zone's transition types.
- for (auto& tt : transition_types_) {
- tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
- tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
- }
-
- transitions_.shrink_to_fit();
- return true;
-}
-
namespace {
using FilePtr = std::unique_ptr<FILE, int (*)(FILE*)>;
@@ -795,6 +576,240 @@ std::unique_ptr<ZoneInfoSource> FuchsiaZoneInfoSource::Open(
} // namespace
+// What (no leap-seconds) UTC+seconds zoneinfo would look like.
+bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
+ transition_types_.resize(1);
+ TransitionType& tt(transition_types_.back());
+ tt.utc_offset = static_cast<std::int_least32_t>(offset.count());
+ tt.is_dst = false;
+ tt.abbr_index = 0;
+
+ // We temporarily add some redundant, contemporary (2015 through 2025)
+ // transitions for performance reasons. See TimeZoneInfo::LocalTime().
+ // TODO: Fix the performance issue and remove the extra transitions.
+ transitions_.clear();
+ transitions_.reserve(12);
+ for (const std::int_fast64_t unix_time : {
+ -(1LL << 59), // a "first half" transition
+ 1420070400LL, // 2015-01-01T00:00:00+00:00
+ 1451606400LL, // 2016-01-01T00:00:00+00:00
+ 1483228800LL, // 2017-01-01T00:00:00+00:00
+ 1514764800LL, // 2018-01-01T00:00:00+00:00
+ 1546300800LL, // 2019-01-01T00:00:00+00:00
+ 1577836800LL, // 2020-01-01T00:00:00+00:00
+ 1609459200LL, // 2021-01-01T00:00:00+00:00
+ 1640995200LL, // 2022-01-01T00:00:00+00:00
+ 1672531200LL, // 2023-01-01T00:00:00+00:00
+ 1704067200LL, // 2024-01-01T00:00:00+00:00
+ 1735689600LL, // 2025-01-01T00:00:00+00:00
+ }) {
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = unix_time;
+ tr.type_index = 0;
+ tr.civil_sec = LocalTime(tr.unix_time, tt).cs;
+ tr.prev_civil_sec = tr.civil_sec - 1;
+ }
+
+ default_transition_type_ = 0;
+ abbreviations_ = FixedOffsetToAbbr(offset);
+ abbreviations_.append(1, '\0');
+ future_spec_.clear(); // never needed for a fixed-offset zone
+ extended_ = false;
+
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
+bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
+ // Read and validate the header.
+ tzhead tzh;
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ Header hdr;
+ if (!hdr.Build(tzh)) return false;
+ std::size_t time_len = 4;
+ if (tzh.tzh_version[0] != '\0') {
+ // Skip the 4-byte data.
+ if (zip->Skip(hdr.DataLength(time_len)) != 0) return false;
+ // Read and validate the header for the 8-byte data.
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ if (tzh.tzh_version[0] == '\0') return false;
+ if (!hdr.Build(tzh)) return false;
+ time_len = 8;
+ }
+ if (hdr.typecnt == 0) return false;
+ if (hdr.leapcnt != 0) {
+ // This code assumes 60-second minutes so we do not want
+ // the leap-second encoded zoneinfo. We could reverse the
+ // compensation, but the "right" encoding is rarely used
+ // so currently we simply reject such data.
+ return false;
+ }
+ if (hdr.ttisstdcnt != 0 && hdr.ttisstdcnt != hdr.typecnt) return false;
+ if (hdr.ttisutcnt != 0 && hdr.ttisutcnt != hdr.typecnt) return false;
+
+ // Read the data into a local buffer.
+ std::size_t len = hdr.DataLength(time_len);
+ std::vector<char> tbuf(len);
+ if (zip->Read(tbuf.data(), len) != len) return false;
+ const char* bp = tbuf.data();
+
+ // Decode and validate the transitions.
+ transitions_.reserve(hdr.timecnt + 2);
+ transitions_.resize(hdr.timecnt);
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].unix_time = (time_len == 4) ? Decode32(bp) : Decode64(bp);
+ bp += time_len;
+ if (i != 0) {
+ // Check that the transitions are ordered by time (as zic guarantees).
+ if (!Transition::ByUnixTime()(transitions_[i - 1], transitions_[i]))
+ return false; // out of order
+ }
+ }
+ bool seen_type_0 = false;
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].type_index = Decode8(bp++);
+ if (transitions_[i].type_index >= hdr.typecnt) return false;
+ if (transitions_[i].type_index == 0) seen_type_0 = true;
+ }
+
+ // Decode and validate the transition types.
+ transition_types_.reserve(hdr.typecnt + 2);
+ transition_types_.resize(hdr.typecnt);
+ for (std::size_t i = 0; i != hdr.typecnt; ++i) {
+ transition_types_[i].utc_offset =
+ static_cast<std::int_least32_t>(Decode32(bp));
+ if (transition_types_[i].utc_offset >= kSecsPerDay ||
+ transition_types_[i].utc_offset <= -kSecsPerDay)
+ return false;
+ bp += 4;
+ transition_types_[i].is_dst = (Decode8(bp++) != 0);
+ transition_types_[i].abbr_index = Decode8(bp++);
+ if (transition_types_[i].abbr_index >= hdr.charcnt) return false;
+ }
+
+ // Determine the before-first-transition type.
+ default_transition_type_ = 0;
+ if (seen_type_0 && hdr.timecnt != 0) {
+ std::uint_fast8_t index = 0;
+ if (transition_types_[0].is_dst) {
+ index = transitions_[0].type_index;
+ while (index != 0 && transition_types_[index].is_dst) --index;
+ }
+ while (index != hdr.typecnt && transition_types_[index].is_dst) ++index;
+ if (index != hdr.typecnt) default_transition_type_ = index;
+ }
+
+ // Copy all the abbreviations.
+ abbreviations_.reserve(hdr.charcnt + 10);
+ abbreviations_.assign(bp, hdr.charcnt);
+ bp += hdr.charcnt;
+
+ // Skip the unused portions. We've already dispensed with leap-second
+ // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
+ // interpreting a POSIX spec that does not include start/end rules, and
+ // that isn't the case here (see "zic -p").
+ bp += (time_len + 4) * hdr.leapcnt; // leap-time + TAI-UTC
+ bp += 1 * hdr.ttisstdcnt; // UTC/local indicators
+ bp += 1 * hdr.ttisutcnt; // standard/wall indicators
+ assert(bp == tbuf.data() + tbuf.size());
+
+ future_spec_.clear();
+ if (tzh.tzh_version[0] != '\0') {
+ // Snarf up the NL-enclosed future POSIX spec. Note
+ // that version '3' files utilize an extended format.
+ auto get_char = [](ZoneInfoSource* azip) -> int {
+ unsigned char ch; // all non-EOF results are positive
+ return (azip->Read(&ch, 1) == 1) ? ch : EOF;
+ };
+ if (get_char(zip) != '\n') return false;
+ for (int c = get_char(zip); c != '\n'; c = get_char(zip)) {
+ if (c == EOF) return false;
+ future_spec_.push_back(static_cast<char>(c));
+ }
+ }
+
+ // We don't check for EOF so that we're forwards compatible.
+
+ // If we did not find version information during the standard loading
+ // process (as of tzh_version '3' that is unsupported), then ask the
+ // ZoneInfoSource for any out-of-bound version string it may be privy to.
+ if (version_.empty()) {
+ version_ = zip->Version();
+ }
+
+ // Trim redundant transitions. zic may have added these to work around
+ // differences between the glibc and reference implementations (see
+ // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just
+ // get in the way when we do future_spec_ extension.
+ while (hdr.timecnt > 1) {
+ if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
+ transitions_[hdr.timecnt - 2].type_index)) {
+ break;
+ }
+ hdr.timecnt -= 1;
+ }
+ transitions_.resize(hdr.timecnt);
+
+ // Ensure that there is always a transition in the first half of the
+ // time line (the second half is handled below) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ if (transitions_.empty() || transitions_.front().unix_time >= 0) {
+ Transition& tr(*transitions_.emplace(transitions_.begin()));
+ tr.unix_time = -(1LL << 59); // -18267312070-10-26T17:01:52+00:00
+ tr.type_index = default_transition_type_;
+ }
+
+ // Extend the transitions using the future specification.
+ if (!ExtendTransitions()) return false;
+
+ // Ensure that there is always a transition in the second half of the
+ // time line (the first half is handled above) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ const Transition& last(transitions_.back());
+ if (last.unix_time < 0) {
+ const std::uint_fast8_t type_index = last.type_index;
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = 2147483647; // 2038-01-19T03:14:07+00:00
+ tr.type_index = type_index;
+ }
+
+ // Compute the local civil time for each transition and the preceding
+ // second. These will be used for reverse conversions in MakeTime().
+ const TransitionType* ttp = &transition_types_[default_transition_type_];
+ for (std::size_t i = 0; i != transitions_.size(); ++i) {
+ Transition& tr(transitions_[i]);
+ tr.prev_civil_sec = LocalTime(tr.unix_time, *ttp).cs - 1;
+ ttp = &transition_types_[tr.type_index];
+ tr.civil_sec = LocalTime(tr.unix_time, *ttp).cs;
+ if (i != 0) {
+ // Check that the transitions are ordered by civil time. Essentially
+ // this means that an offset change cannot cross another such change.
+ // No one does this in practice, and we depend on it in MakeTime().
+ if (!Transition::ByCivilTime()(transitions_[i - 1], tr))
+ return false; // out of order
+ }
+ }
+
+ // Compute the maximum/minimum civil times that can be converted to a
+ // time_point<seconds> for each of the zone's transition types.
+ for (auto& tt : transition_types_) {
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+ }
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
bool TimeZoneInfo::Load(const TString& name) {
// We can ensure that the loading of UTC or any other fixed-offset
// zone never fails because the simple, fixed-offset state can be
@@ -816,6 +831,18 @@ bool TimeZoneInfo::Load(const TString& name) {
return zip != nullptr && Load(zip.get());
}
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::UTC() {
+ auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+ tz->ResetToBuiltinUTC(seconds::zero());
+ return tz;
+}
+
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::Make(const TString& name) {
+ auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+ if (!tz->Load(name)) tz.reset(); // fallback to UTC
+ return tz;
+}
+
// BreakTime() translation for a particular transition type.
time_zone::absolute_lookup TimeZoneInfo::LocalTime(
std::int_fast64_t unix_time, const TransitionType& tt) const {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h
index 52948a362d..a9717ba117 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_info.h
@@ -18,6 +18,7 @@
#include <atomic>
#include <cstddef>
#include <cstdint>
+#include <memory>
#include <util/generic/string.h>
#include <vector>
@@ -64,12 +65,9 @@ struct TransitionType {
// A time zone backed by the IANA Time Zone Database (zoneinfo).
class TimeZoneInfo : public TimeZoneIf {
public:
- TimeZoneInfo() = default;
- TimeZoneInfo(const TimeZoneInfo&) = delete;
- TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
-
- // Loads the zoneinfo for the given name, returning true if successful.
- bool Load(const TString& name);
+ // Factories.
+ static std::unique_ptr<TimeZoneInfo> UTC(); // never fails
+ static std::unique_ptr<TimeZoneInfo> Make(const TString& name);
// TimeZoneIf implementations.
time_zone::absolute_lookup BreakTime(
@@ -83,17 +81,9 @@ class TimeZoneInfo : public TimeZoneIf {
TString Description() const override;
private:
- struct Header { // counts of:
- std::size_t timecnt; // transition times
- std::size_t typecnt; // transition types
- std::size_t charcnt; // zone abbreviation characters
- std::size_t leapcnt; // leap seconds (we expect none)
- std::size_t ttisstdcnt; // UTC/local indicators (unused)
- std::size_t ttisutcnt; // standard/wall indicators (unused)
-
- bool Build(const tzhead& tzh);
- std::size_t DataLength(std::size_t time_len) const;
- };
+ TimeZoneInfo() = default;
+ TimeZoneInfo(const TimeZoneInfo&) = delete;
+ TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
bool GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
const TString& abbr, std::uint_least8_t* index);
@@ -102,6 +92,7 @@ class TimeZoneInfo : public TimeZoneIf {
bool ExtendTransitions();
bool ResetToBuiltinUTC(const seconds& offset);
+ bool Load(const TString& name);
bool Load(ZoneInfoSource* zip);
// Helpers for BreakTime() and MakeTime().
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc
index 9f713ea5eb..a38a4a092d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.cc
@@ -62,7 +62,7 @@ auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
}
#elif defined(__native_client__) || defined(__myriad2__) || \
defined(__EMSCRIPTEN__)
-// Uses the globals: 'timezone' and 'tzname'.
+// Uses the globals: '_timezone' and 'tzname'.
auto tm_gmtoff(const std::tm& tm) -> decltype(_timezone + 0) {
const bool is_dst = tm.tm_isdst > 0;
return _timezone + (is_dst ? 60 * 60 : 0);
@@ -71,6 +71,16 @@ auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
const bool is_dst = tm.tm_isdst > 0;
return tzname[is_dst];
}
+#elif defined(__VXWORKS__)
+// Uses the globals: 'timezone' and 'tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(timezone + 0) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return timezone + (is_dst ? 60 * 60 : 0);
+}
+auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return tzname[is_dst];
+}
#else
// Adapt to different spellings of the struct std::tm extension fields.
#if defined(tm_gmtoff)
@@ -108,6 +118,7 @@ auto tm_zone(const T& tm) -> decltype(tm.__tm_zone) {
}
#endif // tm_zone
#endif
+using tm_gmtoff_t = decltype(tm_gmtoff(std::tm{}));
inline std::tm* gm_time(const std::time_t* timep, std::tm* result) {
#if defined(_WIN32) || defined(_WIN64)
@@ -125,37 +136,36 @@ inline std::tm* local_time(const std::time_t* timep, std::tm* result) {
#endif
}
-// Converts a civil second and "dst" flag into a time_t and UTC offset.
+// Converts a civil second and "dst" flag into a time_t and a struct tm.
// Returns false if time_t cannot represent the requested civil second.
// Caller must have already checked that cs.year() will fit into a tm_year.
-bool make_time(const civil_second& cs, int is_dst, std::time_t* t, int* off) {
- std::tm tm;
- tm.tm_year = static_cast<int>(cs.year() - year_t{1900});
- tm.tm_mon = cs.month() - 1;
- tm.tm_mday = cs.day();
- tm.tm_hour = cs.hour();
- tm.tm_min = cs.minute();
- tm.tm_sec = cs.second();
- tm.tm_isdst = is_dst;
- *t = std::mktime(&tm);
+bool make_time(const civil_second& cs, int is_dst, std::time_t* t,
+ std::tm* tm) {
+ tm->tm_year = static_cast<int>(cs.year() - year_t{1900});
+ tm->tm_mon = cs.month() - 1;
+ tm->tm_mday = cs.day();
+ tm->tm_hour = cs.hour();
+ tm->tm_min = cs.minute();
+ tm->tm_sec = cs.second();
+ tm->tm_isdst = is_dst;
+ *t = std::mktime(tm);
if (*t == std::time_t{-1}) {
std::tm tm2;
const std::tm* tmp = local_time(t, &tm2);
- if (tmp == nullptr || tmp->tm_year != tm.tm_year ||
- tmp->tm_mon != tm.tm_mon || tmp->tm_mday != tm.tm_mday ||
- tmp->tm_hour != tm.tm_hour || tmp->tm_min != tm.tm_min ||
- tmp->tm_sec != tm.tm_sec) {
+ if (tmp == nullptr || tmp->tm_year != tm->tm_year ||
+ tmp->tm_mon != tm->tm_mon || tmp->tm_mday != tm->tm_mday ||
+ tmp->tm_hour != tm->tm_hour || tmp->tm_min != tm->tm_min ||
+ tmp->tm_sec != tm->tm_sec) {
// A true error (not just one second before the epoch).
return false;
}
}
- *off = static_cast<int>(tm_gmtoff(tm));
return true;
}
// Find the least time_t in [lo:hi] where local time matches offset, given:
// (1) lo doesn't match, (2) hi does, and (3) there is only one transition.
-std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
+std::time_t find_trans(std::time_t lo, std::time_t hi, tm_gmtoff_t offset) {
std::tm tm;
while (lo + 1 != hi) {
const std::time_t mid = lo + (hi - lo) / 2;
@@ -183,8 +193,9 @@ std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
} // namespace
-TimeZoneLibC::TimeZoneLibC(const TString& name)
- : local_(name == "localtime") {}
+std::unique_ptr<TimeZoneLibC> TimeZoneLibC::Make(const TString& name) {
+ return std::unique_ptr<TimeZoneLibC>(new TimeZoneLibC(name));
+}
time_zone::absolute_lookup TimeZoneLibC::BreakTime(
const time_point<seconds>& tp) const {
@@ -254,33 +265,37 @@ time_zone::civil_lookup TimeZoneLibC::MakeTime(const civil_second& cs) const {
// We probe with "is_dst" values of 0 and 1 to try to distinguish unique
// civil seconds from skipped or repeated ones. This is not always possible
// however, as the "dst" flag does not change over some offset transitions.
- // We are also subject to the vagaries of mktime() implementations.
+ // We are also subject to the vagaries of mktime() implementations. For
+ // example, some implementations treat "tm_isdst" as a demand (useless),
+ // and some as a disambiguator (useful).
std::time_t t0, t1;
- int offset0, offset1;
- if (make_time(cs, 0, &t0, &offset0) && make_time(cs, 1, &t1, &offset1)) {
- if (t0 == t1) {
+ std::tm tm0, tm1;
+ if (make_time(cs, 0, &t0, &tm0) && make_time(cs, 1, &t1, &tm1)) {
+ if (tm0.tm_isdst == tm1.tm_isdst) {
// The civil time was singular (pre == trans == post).
- const time_point<seconds> tp = FromUnixSeconds(t0);
+ const time_point<seconds> tp = FromUnixSeconds(tm0.tm_isdst ? t1 : t0);
return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
}
- if (t0 > t1) {
+ tm_gmtoff_t offset = tm_gmtoff(tm0);
+ if (t0 < t1) { // negative DST
std::swap(t0, t1);
- std::swap(offset0, offset1);
+ offset = tm_gmtoff(tm1);
}
- const std::time_t tt = find_trans(t0, t1, offset1);
+
+ const std::time_t tt = find_trans(t1, t0, offset);
const time_point<seconds> trans = FromUnixSeconds(tt);
- if (offset0 < offset1) {
+ if (tm0.tm_isdst) {
// The civil time did not exist (pre >= trans > post).
- const time_point<seconds> pre = FromUnixSeconds(t1);
- const time_point<seconds> post = FromUnixSeconds(t0);
+ const time_point<seconds> pre = FromUnixSeconds(t0);
+ const time_point<seconds> post = FromUnixSeconds(t1);
return {time_zone::civil_lookup::SKIPPED, pre, trans, post};
}
// The civil time was ambiguous (pre < trans <= post).
- const time_point<seconds> pre = FromUnixSeconds(t0);
- const time_point<seconds> post = FromUnixSeconds(t1);
+ const time_point<seconds> pre = FromUnixSeconds(t1);
+ const time_point<seconds> post = FromUnixSeconds(t0);
return {time_zone::civil_lookup::REPEATED, pre, trans, post};
}
@@ -309,6 +324,9 @@ TString TimeZoneLibC::Description() const {
return local_ ? "localtime" : "UTC";
}
+TimeZoneLibC::TimeZoneLibC(const TString& name)
+ : local_(name == "localtime") {}
+
} // namespace cctz
} // namespace time_internal
Y_ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h
index 1360a366c3..f4d366ac2c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_libc.h
@@ -15,6 +15,7 @@
#ifndef Y_ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
#define Y_ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
+#include <memory>
#include <util/generic/string.h>
#include "y_absl/base/config.h"
@@ -27,10 +28,10 @@ namespace cctz {
// A time zone backed by gmtime_r(3), localtime_r(3), and mktime(3),
// and which therefore only supports UTC and the local time zone.
-// TODO: Add support for fixed offsets from UTC.
class TimeZoneLibC : public TimeZoneIf {
public:
- explicit TimeZoneLibC(const TString& name);
+ // Factory.
+ static std::unique_ptr<TimeZoneLibC> Make(const TString& name);
// TimeZoneIf implementations.
time_zone::absolute_lookup BreakTime(
@@ -44,6 +45,10 @@ class TimeZoneLibC : public TimeZoneIf {
TString Description() const override;
private:
+ explicit TimeZoneLibC(const TString& name);
+ TimeZoneLibC(const TimeZoneLibC&) = delete;
+ TimeZoneLibC& operator=(const TimeZoneLibC&) = delete;
+
const bool local_; // localtime or UTC
};
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc
index cd2490c91c..dc495d0606 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -35,6 +35,24 @@
#error #include <zircon/types.h>
#endif
+#if defined(_WIN32)
+#include <sdkddkver.h>
+// Include only when the SDK is for Windows 10 (and later), and the binary is
+// targeted for Windows XP and later.
+// Note: The Windows SDK added windows.globalization.h file for Windows 10, but
+// MinGW did not add it until NTDDI_WIN10_NI (SDK version 10.0.22621.0).
+#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) || \
+ (defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \
+ (_WIN32_WINNT >= _WIN32_WINNT_WINXP)
+#define USE_WIN32_LOCAL_TIME_ZONE
+#include <roapi.h>
+#include <tchar.h>
+#include <wchar.h>
+#include <windows.globalization.h>
+#include <windows.h>
+#endif
+#endif
+
#include <cstdlib>
#include <cstring>
#include <util/generic/string.h>
@@ -47,8 +65,8 @@ Y_ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
-#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
namespace {
+#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
// Android 'L' removes __system_property_get() from the NDK, however
// it is still a hidden symbol in libc so we use dlsym() to access it.
// See Chromium's base/sys_info_android.cc for a similar example.
@@ -72,9 +90,84 @@ int __system_property_get(const char* name, char* value) {
static property_get_func system_property_get = LoadSystemPropertyGet();
return system_property_get ? system_property_get(name, value) : -1;
}
+#endif
-} // namespace
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
+// local time zone. Returns an empty vector in case of an error.
+TString win32_local_time_zone(const HMODULE combase) {
+ TString result;
+ const auto ro_activate_instance =
+ reinterpret_cast<decltype(&RoActivateInstance)>(
+ GetProcAddress(combase, "RoActivateInstance"));
+ if (!ro_activate_instance) {
+ return result;
+ }
+ const auto windows_create_string_reference =
+ reinterpret_cast<decltype(&WindowsCreateStringReference)>(
+ GetProcAddress(combase, "WindowsCreateStringReference"));
+ if (!windows_create_string_reference) {
+ return result;
+ }
+ const auto windows_delete_string =
+ reinterpret_cast<decltype(&WindowsDeleteString)>(
+ GetProcAddress(combase, "WindowsDeleteString"));
+ if (!windows_delete_string) {
+ return result;
+ }
+ const auto windows_get_string_raw_buffer =
+ reinterpret_cast<decltype(&WindowsGetStringRawBuffer)>(
+ GetProcAddress(combase, "WindowsGetStringRawBuffer"));
+ if (!windows_get_string_raw_buffer) {
+ return result;
+ }
+
+ // The string returned by WindowsCreateStringReference doesn't need to be
+ // deleted.
+ HSTRING calendar_class_id;
+ HSTRING_HEADER calendar_class_id_header;
+ HRESULT hr = windows_create_string_reference(
+ RuntimeClass_Windows_Globalization_Calendar,
+ sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1,
+ &calendar_class_id_header, &calendar_class_id);
+ if (FAILED(hr)) {
+ return result;
+ }
+
+ IInspectable* calendar;
+ hr = ro_activate_instance(calendar_class_id, &calendar);
+ if (FAILED(hr)) {
+ return result;
+ }
+
+ ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone;
+ hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone));
+ if (FAILED(hr)) {
+ calendar->Release();
+ return result;
+ }
+
+ HSTRING tz_hstr;
+ hr = time_zone->GetTimeZone(&tz_hstr);
+ if (SUCCEEDED(hr)) {
+ UINT32 wlen;
+ const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen);
+ if (tz_wstr) {
+ const int size =
+ WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+ nullptr, 0, nullptr, nullptr);
+ result.resize(static_cast<size_t>(size));
+ WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+ &result[0], size, nullptr, nullptr);
+ }
+ windows_delete_string(tz_hstr);
+ }
+ time_zone->Release();
+ calendar->Release();
+ return result;
+}
#endif
+} // namespace
TString time_zone::name() const { return effective_impl().Name(); }
@@ -190,6 +283,39 @@ time_zone local_time_zone() {
zone = primary_tz.c_str();
}
#endif
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+ // Use the WinRT Calendar class to get the local time zone. This feature is
+ // available on Windows 10 and later. The library is dynamically linked to
+ // maintain binary compatibility with Windows XP - Windows 7. On Windows 8,
+ // The combase.dll API functions are available but the RoActivateInstance
+ // call will fail for the Calendar class.
+ TString winrt_tz;
+ const HMODULE combase =
+ LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if (combase) {
+ const auto ro_initialize = reinterpret_cast<decltype(&::RoInitialize)>(
+ GetProcAddress(combase, "RoInitialize"));
+ const auto ro_uninitialize = reinterpret_cast<decltype(&::RoUninitialize)>(
+ GetProcAddress(combase, "RoUninitialize"));
+ if (ro_initialize && ro_uninitialize) {
+ const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED);
+ // RPC_E_CHANGED_MODE means that a previous RoInitialize call specified
+ // a different concurrency model. The WinRT runtime is initialized and
+ // should work for our purpose here, but we should *not* call
+ // RoUninitialize because it's a failure.
+ if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) {
+ winrt_tz = win32_local_time_zone(combase);
+ if (SUCCEEDED(hr)) {
+ ro_uninitialize();
+ }
+ }
+ }
+ FreeLibrary(combase);
+ }
+ if (!winrt_tz.empty()) {
+ zone = winrt_tz.c_str();
+ }
+#endif
// Allow ${TZ} to override to default zone.
char* tz_env = nullptr;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h
index 3eaa5384c7..bdf3d82cf2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/time_zone_posix.h
@@ -104,7 +104,7 @@ struct PosixTransition {
// The entirety of a POSIX-string specified time-zone rule. The standard
// abbreviation and offset are always given. If the time zone includes
-// daylight saving, then the daylight abbrevation is non-empty and the
+// daylight saving, then the daylight abbreviation is non-empty and the
// remaining fields are also valid. Note that the start/end transitions
// are not ordered---in the southern hemisphere the transition to end
// daylight time occurs first in any particular year.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h
index 31e8598257..9613055db7 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/tzfile.h
@@ -102,20 +102,24 @@ struct tzhead {
*/
#ifndef TZ_MAX_TIMES
+/* This must be at least 242 for Europe/London with 'zic -b fat'. */
#define TZ_MAX_TIMES 2000
#endif /* !defined TZ_MAX_TIMES */
#ifndef TZ_MAX_TYPES
-/* This must be at least 17 for Europe/Samara and Europe/Vilnius. */
+/* This must be at least 18 for Europe/Vilnius with 'zic -b fat'. */
#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
#endif /* !defined TZ_MAX_TYPES */
#ifndef TZ_MAX_CHARS
+/* This must be at least 40 for America/Anchorage. */
#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
/* (limited by what unsigned chars can hold) */
#endif /* !defined TZ_MAX_CHARS */
#ifndef TZ_MAX_LEAPS
+/* This must be at least 27 for leap seconds from 1972 through mid-2023.
+ There's a plan to discontinue leap seconds by 2035. */
#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
#endif /* !defined TZ_MAX_LEAPS */
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc
index 098283a681..2d0f04bf48 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/internal/cctz/src/zone_info_source.cc
@@ -67,41 +67,41 @@ extern ZoneInfoSourceFactory zone_info_source_factory;
extern ZoneInfoSourceFactory default_factory;
ZoneInfoSourceFactory default_factory = DefaultFactory;
#if defined(_M_IX86) || defined(_M_ARM)
-#pragma comment( \
- linker, \
- "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZA=?default_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZA")
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA=?default_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA")
#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64)
-#pragma comment( \
- linker, \
- "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZEA=?default_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZEA")
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA=?default_factory@cctz_extension@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" Y_ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" Y_ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA")
#else
#error Unsupported MSVC platform
#endif // _M_<PLATFORM>
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc
index 5664a4b4c1..9c6691c5fa 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.cc
@@ -66,6 +66,7 @@ inline int64_t FloorToUnit(y_absl::Duration d, y_absl::Duration unit) {
: q - 1;
}
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline y_absl::Time::Breakdown InfiniteFutureBreakdown() {
y_absl::Time::Breakdown bd;
bd.year = std::numeric_limits<int64_t>::max();
@@ -99,6 +100,7 @@ inline y_absl::Time::Breakdown InfinitePastBreakdown() {
bd.zone_abbr = "-00";
return bd;
}
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
inline y_absl::TimeZone::CivilInfo InfiniteFutureCivilInfo() {
TimeZone::CivilInfo ci;
@@ -120,6 +122,7 @@ inline y_absl::TimeZone::CivilInfo InfinitePastCivilInfo() {
return ci;
}
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline y_absl::TimeConversion InfiniteFutureTimeConversion() {
y_absl::TimeConversion tc;
tc.pre = tc.trans = tc.post = y_absl::InfiniteFuture();
@@ -135,9 +138,10 @@ inline TimeConversion InfinitePastTimeConversion() {
tc.normalized = true;
return tc;
}
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
// Makes a Time from sec, overflowing to InfiniteFuture/InfinitePast as
-// necessary. If sec is min/max, then consult cs+tz to check for overlow.
+// necessary. If sec is min/max, then consult cs+tz to check for overflow.
Time MakeTimeWithOverflow(const cctz::time_point<cctz::seconds>& sec,
const cctz::civil_second& cs,
const cctz::time_zone& tz,
@@ -203,6 +207,7 @@ bool FindTransition(const cctz::time_zone& tz,
// Time
//
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
y_absl::Time::Breakdown Time::In(y_absl::TimeZone tz) const {
if (*this == y_absl::InfiniteFuture()) return InfiniteFutureBreakdown();
if (*this == y_absl::InfinitePast()) return InfinitePastBreakdown();
@@ -227,6 +232,7 @@ y_absl::Time::Breakdown Time::In(y_absl::TimeZone tz) const {
bd.zone_abbr = al.abbr;
return bd;
}
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
//
// Conversions from/to other time types.
@@ -398,7 +404,7 @@ bool TimeZone::PrevTransition(Time t, CivilTransition* trans) const {
//
// Conversions involving time zones.
//
-
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
y_absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
int min, int sec, TimeZone tz) {
// Avoids years that are too extreme for CivilSecond to normalize.
@@ -430,6 +436,7 @@ y_absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
}
return tc;
}
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
y_absl::Time FromTM(const struct tm& tm, y_absl::TimeZone tz) {
civil_year_t tm_year = tm.tm_year;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
index affcd18c88..9d68396d5d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/time/time.h
@@ -84,6 +84,7 @@ struct timeval;
#include <type_traits>
#include <utility>
+#include "y_absl/base/config.h"
#include "y_absl/base/macros.h"
#include "y_absl/strings/string_view.h"
#include "y_absl/time/civil_time.h"
@@ -187,7 +188,12 @@ class Duration {
Duration& operator%=(Duration rhs);
// Overloads that forward to either the int64_t or double overloads above.
- // Integer operands must be representable as int64_t.
+ // Integer operands must be representable as int64_t. Integer division is
+ // truncating, so values less than the resolution will be returned as zero.
+ // Floating-point multiplication and division is rounding (halfway cases
+ // rounding away from zero), so values less than the resolution may be
+ // returned as either the resolution or zero. In particular, `d / 2.0`
+ // can produce `d` when it is the resolution and "even".
template <typename T, time_internal::EnableIfIntegral<T> = 0>
Duration& operator*=(T r) {
int64_t x = r;
@@ -214,7 +220,7 @@ class Duration {
template <typename H>
friend H AbslHashValue(H h, Duration d) {
- return H::combine(std::move(h), d.rep_hi_, d.rep_lo_);
+ return H::combine(std::move(h), d.rep_hi_.Get(), d.rep_lo_);
}
private:
@@ -223,7 +229,79 @@ class Duration {
friend constexpr Duration time_internal::MakeDuration(int64_t hi,
uint32_t lo);
constexpr Duration(int64_t hi, uint32_t lo) : rep_hi_(hi), rep_lo_(lo) {}
- int64_t rep_hi_;
+
+ // We store `rep_hi_` 4-byte rather than 8-byte aligned to avoid 4 bytes of
+ // tail padding.
+ class HiRep {
+ public:
+ // Default constructor default-initializes `hi_`, which has the same
+ // semantics as default-initializing an `int64_t` (undetermined value).
+ HiRep() = default;
+
+ HiRep(const HiRep&) = default;
+ HiRep& operator=(const HiRep&) = default;
+
+ explicit constexpr HiRep(const int64_t value)
+ : // C++17 forbids default-initialization in constexpr contexts. We can
+ // remove this in C++20.
+#if defined(Y_ABSL_IS_BIG_ENDIAN) && Y_ABSL_IS_BIG_ENDIAN
+ hi_(0),
+ lo_(0)
+#else
+ lo_(0),
+ hi_(0)
+#endif
+ {
+ *this = value;
+ }
+
+ constexpr int64_t Get() const {
+ const uint64_t unsigned_value =
+ (static_cast<uint64_t>(hi_) << 32) | static_cast<uint64_t>(lo_);
+ // `static_cast<int64_t>(unsigned_value)` is implementation-defined
+ // before c++20. On all supported platforms the behaviour is that mandated
+ // by c++20, i.e. "If the destination type is signed, [...] the result is
+ // the unique value of the destination type equal to the source value
+ // modulo 2^n, where n is the number of bits used to represent the
+ // destination type."
+ static_assert(
+ (static_cast<int64_t>((std::numeric_limits<uint64_t>::max)()) ==
+ int64_t{-1}) &&
+ (static_cast<int64_t>(static_cast<uint64_t>(
+ (std::numeric_limits<int64_t>::max)()) +
+ 1) ==
+ (std::numeric_limits<int64_t>::min)()),
+ "static_cast<int64_t>(uint64_t) does not have c++20 semantics");
+ return static_cast<int64_t>(unsigned_value);
+ }
+
+ constexpr HiRep& operator=(const int64_t value) {
+ // "If the destination type is unsigned, the resulting value is the
+ // smallest unsigned value equal to the source value modulo 2^n
+ // where `n` is the number of bits used to represent the destination
+ // type".
+ const auto unsigned_value = static_cast<uint64_t>(value);
+ hi_ = static_cast<uint32_t>(unsigned_value >> 32);
+ lo_ = static_cast<uint32_t>(unsigned_value);
+ return *this;
+ }
+
+ private:
+ // Notes:
+ // - Ideally we would use a `char[]` and `std::bitcast`, but the latter
+ // does not exist (and is not constexpr in `y_absl`) before c++20.
+ // - Order is optimized depending on endianness so that the compiler can
+ // turn `Get()` (resp. `operator=()`) into a single 8-byte load (resp.
+ // store).
+#if defined(Y_ABSL_IS_BIG_ENDIAN) && Y_ABSL_IS_BIG_ENDIAN
+ uint32_t hi_;
+ uint32_t lo_;
+#else
+ uint32_t lo_;
+ uint32_t hi_;
+#endif
+ };
+ HiRep rep_hi_;
uint32_t rep_lo_;
};
@@ -609,6 +687,12 @@ inline std::ostream& operator<<(std::ostream& os, Duration d) {
return os << FormatDuration(d);
}
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Duration d) {
+ sink.Append(FormatDuration(d));
+}
+
// ParseDuration()
//
// Parses a duration string consisting of a possibly signed sequence of
@@ -718,8 +802,7 @@ class Time {
// `y_absl::TimeZone`.
//
// Deprecated. Use `y_absl::TimeZone::CivilInfo`.
- struct
- Breakdown {
+ struct Y_ABSL_DEPRECATED("Use `y_absl::TimeZone::CivilInfo`.") Breakdown {
int64_t year; // year (e.g., 2013)
int month; // month of year [1:12]
int day; // day of month [1:31]
@@ -745,7 +828,10 @@ class Time {
// Returns the breakdown of this instant in the given TimeZone.
//
// Deprecated. Use `y_absl::TimeZone::At(Time)`.
+ Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+ Y_ABSL_DEPRECATED("Use `y_absl::TimeZone::At(Time)`.")
Breakdown In(TimeZone tz) const;
+ Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
template <typename H>
friend H AbslHashValue(H h, Time t) {
@@ -839,7 +925,8 @@ Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfinitePast() {
// FromUDate()
// FromUniversal()
//
-// Creates an `y_absl::Time` from a variety of other representations.
+// Creates an `y_absl::Time` from a variety of other representations. See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns);
Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us);
Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms);
@@ -856,10 +943,12 @@ Y_ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUniversal(int64_t universal);
// ToUDate()
// ToUniversal()
//
-// Converts an `y_absl::Time` to a variety of other representations. Note that
-// these operations round down toward negative infinity where necessary to
-// adjust to the resolution of the result type. Beware of possible time_t
-// over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
+// Converts an `y_absl::Time` to a variety of other representations. See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
+//
+// Note that these operations round down toward negative infinity where
+// necessary to adjust to the resolution of the result type. Beware of
+// possible time_t over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
Y_ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixNanos(Time t);
Y_ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMicros(Time t);
Y_ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMillis(Time t);
@@ -1236,8 +1325,7 @@ Y_ABSL_ATTRIBUTE_PURE_FUNCTION inline Time FromCivil(CivilSecond ct,
// `y_absl::ConvertDateTime()`. Legacy version of `y_absl::TimeZone::TimeInfo`.
//
// Deprecated. Use `y_absl::TimeZone::TimeInfo`.
-struct
- TimeConversion {
+struct Y_ABSL_DEPRECATED("Use `y_absl::TimeZone::TimeInfo`.") TimeConversion {
Time pre; // time calculated using the pre-transition offset
Time trans; // when the civil-time discontinuity occurred
Time post; // time calculated using the post-transition offset
@@ -1271,8 +1359,11 @@ struct
// // y_absl::ToCivilDay(tc.pre, tz).day() == 1
//
// Deprecated. Use `y_absl::TimeZone::At(CivilSecond)`.
+Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+Y_ABSL_DEPRECATED("Use `y_absl::TimeZone::At(CivilSecond)`.")
TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
int min, int sec, TimeZone tz);
+Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
// FromDateTime()
//
@@ -1289,9 +1380,12 @@ TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
// Deprecated. Use `y_absl::FromCivil(CivilSecond, TimeZone)`. Note that the
// behavior of `FromCivil()` differs from `FromDateTime()` for skipped civil
// times. If you care about that see `y_absl::TimeZone::At(y_absl::CivilSecond)`.
-inline Time FromDateTime(int64_t year, int mon, int day, int hour,
- int min, int sec, TimeZone tz) {
+Y_ABSL_DEPRECATED("Use `y_absl::FromCivil(CivilSecond, TimeZone)`.")
+inline Time FromDateTime(int64_t year, int mon, int day, int hour, int min,
+ int sec, TimeZone tz) {
+ Y_ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
return ConvertDateTime(year, mon, day, hour, min, sec, tz).pre;
+ Y_ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
}
// FromTM()
@@ -1386,6 +1480,12 @@ inline std::ostream& operator<<(std::ostream& os, Time t) {
return os << FormatTime(t);
}
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Time t) {
+ sink.Append(FormatTime(t));
+}
+
// ParseTime()
//
// Parses an input string according to the provided format string and
@@ -1491,7 +1591,7 @@ Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeNormalizedDuration(
// Provide access to the Duration representation.
Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d) {
- return d.rep_hi_;
+ return d.rep_hi_.Get();
}
Y_ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d) {
return d.rep_lo_;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
index 34ae94b275..faaf18711b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/optional.h
@@ -25,34 +25,6 @@
#include "y_absl/meta/type_traits.h"
#include "y_absl/utility/utility.h"
-// Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-//
-// Inheriting constructors is supported in GCC 4.8+, Clang 3.3+ and MSVC 2015.
-// __cpp_inheriting_constructors is a predefined macro and a recommended way to
-// check for this language feature, but GCC doesn't support it until 5.0 and
-// Clang doesn't support it until 3.6.
-// Also, MSVC 2015 has a bug: it doesn't inherit the constexpr template
-// constructor. For example, the following code won't work on MSVC 2015 Update3:
-// struct Base {
-// int t;
-// template <typename T>
-// constexpr Base(T t_) : t(t_) {}
-// };
-// struct Foo : Base {
-// using Base::Base;
-// }
-// constexpr Foo foo(0); // doesn't work on MSVC 2015
-#if defined(__clang__)
-#if __has_feature(cxx_inheriting_constructors)
-#define Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-#elif (defined(__GNUC__) && \
- (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 8)) || \
- (__cpp_inheriting_constructors >= 200802) || \
- (defined(_MSC_VER) && _MSC_VER >= 1910)
-#define Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-
namespace y_absl {
Y_ABSL_NAMESPACE_BEGIN
@@ -145,15 +117,7 @@ template <typename T>
class optional_data_base : public optional_data_dtor_base<T> {
protected:
using base = optional_data_dtor_base<T>;
-#ifdef Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using base::base;
-#else
- optional_data_base() = default;
-
- template <typename... Args>
- constexpr explicit optional_data_base(in_place_t t, Args&&... args)
- : base(t, y_absl::forward<Args>(args)...) {}
-#endif
template <typename... Args>
void construct(Args&&... args) {
@@ -188,27 +152,13 @@ class optional_data;
template <typename T>
class optional_data<T, true> : public optional_data_base<T> {
protected:
-#ifdef Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using optional_data_base<T>::optional_data_base;
-#else
- optional_data() = default;
-
- template <typename... Args>
- constexpr explicit optional_data(in_place_t t, Args&&... args)
- : optional_data_base<T>(t, y_absl::forward<Args>(args)...) {}
-#endif
};
template <typename T>
class optional_data<T, false> : public optional_data_base<T> {
protected:
-#ifdef Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using optional_data_base<T>::optional_data_base;
-#else
- template <typename... Args>
- constexpr explicit optional_data(in_place_t t, Args&&... args)
- : optional_data_base<T>(t, y_absl::forward<Args>(args)...) {}
-#endif
optional_data() = default;
@@ -399,6 +349,4 @@ struct optional_hash_base<T, decltype(std::hash<y_absl::remove_const_t<T> >()(
Y_ABSL_NAMESPACE_END
} // namespace y_absl
-#undef Y_ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-
#endif // Y_ABSL_TYPES_INTERNAL_OPTIONAL_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h
index 3b30eabf1f..09bf4e4995 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/span.h
@@ -88,7 +88,7 @@ using EnableIfMutable =
template <template <typename> class SpanT, typename T>
bool EqualImpl(SpanT<T> a, SpanT<T> b) {
static_assert(std::is_const<T>::value, "");
- return y_absl::equal(a.begin(), a.end(), b.begin(), b.end());
+ return std::equal(a.begin(), a.end(), b.begin(), b.end());
}
template <template <typename> class SpanT, typename T>
@@ -125,7 +125,7 @@ struct IsView<
};
// These enablers result in 'int' so they can be used as typenames or defaults
-// in template paramters lists.
+// in template parameters lists.
template <typename T>
using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
index 3e25015a99..c3e5cad1bc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/internal/variant.h
@@ -877,8 +877,8 @@ struct IndexOfConstructedType<
template <std::size_t... Is>
struct ContainsVariantNPos
: y_absl::negation<std::is_same< // NOLINT
- y_absl::integer_sequence<bool, 0 <= Is...>,
- y_absl::integer_sequence<bool, Is != y_absl::variant_npos...>>> {};
+ std::integer_sequence<bool, 0 <= Is...>,
+ std::integer_sequence<bool, Is != y_absl::variant_npos...>>> {};
template <class Op, class... QualifiedVariants>
using RawVisitResult =
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
index 2a71ecb6c4..2d4eefeec0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/optional.h
@@ -130,7 +130,7 @@ class optional : private optional_internal::optional_data<T>,
// Constructs an `optional` holding an empty value, NOT a default constructed
// `T`.
- constexpr optional() noexcept {}
+ constexpr optional() noexcept = default;
// Constructs an `optional` initialized with `nullopt` to hold an empty value.
constexpr optional(nullopt_t) noexcept {} // NOLINT(runtime/explicit)
@@ -357,7 +357,7 @@ class optional : private optional_internal::optional_data<T>,
template <typename... Args,
typename = typename std::enable_if<
std::is_constructible<T, Args&&...>::value>::type>
- T& emplace(Args&&... args) {
+ T& emplace(Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
this->destruct();
this->construct(std::forward<Args>(args)...);
return reference();
@@ -377,7 +377,8 @@ class optional : private optional_internal::optional_data<T>,
template <typename U, typename... Args,
typename = typename std::enable_if<std::is_constructible<
T, std::initializer_list<U>&, Args&&...>::value>::type>
- T& emplace(std::initializer_list<U> il, Args&&... args) {
+ T& emplace(std::initializer_list<U> il,
+ Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
this->destruct();
this->construct(il, std::forward<Args>(args)...);
return reference();
@@ -414,11 +415,11 @@ class optional : private optional_internal::optional_data<T>,
// `optional` is empty, behavior is undefined.
//
// If you need myOpt->foo in constexpr, use (*myOpt).foo instead.
- const T* operator->() const {
+ const T* operator->() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(this->engaged_);
return std::addressof(this->data_);
}
- T* operator->() {
+ T* operator->() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(this->engaged_);
return std::addressof(this->data_);
}
@@ -427,17 +428,17 @@ class optional : private optional_internal::optional_data<T>,
//
// Accesses the underlying `T` value of an `optional`. If the `optional` is
// empty, behavior is undefined.
- constexpr const T& operator*() const& {
+ constexpr const T& operator*() const& Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Y_ABSL_HARDENING_ASSERT(this->engaged_), reference();
}
- T& operator*() & {
+ T& operator*() & Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(this->engaged_);
return reference();
}
- constexpr const T&& operator*() const && {
+ constexpr const T&& operator*() const&& Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Y_ABSL_HARDENING_ASSERT(this->engaged_), y_absl::move(reference());
}
- T&& operator*() && {
+ T&& operator*() && Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
Y_ABSL_HARDENING_ASSERT(this->engaged_);
return std::move(reference());
}
@@ -472,23 +473,24 @@ class optional : private optional_internal::optional_data<T>,
// and lvalue/rvalue-ness of the `optional` is preserved to the view of
// the `T` sub-object. Throws `y_absl::bad_optional_access` when the `optional`
// is empty.
- constexpr const T& value() const & {
+ constexpr const T& value() const& Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference());
}
- T& value() & {
+ T& value() & Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
return static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference());
}
- T&& value() && { // NOLINT(build/c++11)
+ T&& value() && Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
return std::move(
static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference()));
}
- constexpr const T&& value() const && { // NOLINT(build/c++11)
+ constexpr const T&& value()
+ const&& Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
return y_absl::move(
static_cast<bool>(*this)
? reference()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
index 90745382dd..8096724218 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/types/span.h
@@ -296,8 +296,7 @@ class Span {
//
// Returns a reference to the i'th element of this span.
constexpr reference operator[](size_type i) const noexcept {
- // MSVC 2015 accepts this as constexpr, but not ptr_[i]
- return Y_ABSL_HARDENING_ASSERT(i < size()), *(data() + i);
+ return Y_ABSL_HARDENING_ASSERT(i < size()), ptr_[i];
}
// Span::at()
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make
index 26a3c44152..8981153caa 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/utility/ya.make
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20230125.3)
+VERSION(20230802.0)
-ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230125.3.tar.gz)
+ORIGINAL_SOURCE(https://github.com/abseil/abseil-cpp/archive/20230802.0.tar.gz)
NO_RUNTIME()